]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
Add TBB 3.0 update 3 (commercially aligned open source release).
authorbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Fri, 19 Nov 2010 01:21:29 +0000 (01:21 +0000)
committerbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Fri, 19 Nov 2010 01:21:29 +0000 (01:21 +0000)
git-svn-id: https://svn.dealii.org/trunk@22813 0785d39b-7218-0410-832d-ea1e28bc413d

625 files changed:
deal.II/contrib/tbb/tbb30_104oss/CHANGES [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/COPYING [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/Makefile [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/README [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/AIX.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/AIX.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/Makefile.rml [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbb [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbbmalloc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/Makefile.test [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/SunOS.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/SunOS.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/SunOS.suncc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/codecov.txt [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/common.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/common_rules.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/detect.js [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.bat [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/linux.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/linux.icc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/linux.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/macos.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/macos.icc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/macos.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/suncc.map.pause [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.bat [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/version_info_aix.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/version_info_linux.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/version_info_macos.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/version_info_sunos.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/version_info_windows.js [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/makefile.sln [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbb.vcproj [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc.vcproj [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/vsproject/version_string.tmp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/windows.cl.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/windows.gcc.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/windows.icl.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/windows.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/xbox360.cl.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/build/xbox360.inc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/Release_Notes.txt [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00001.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00002.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00003.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00004.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00005.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00006.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00007.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00009.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00011.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00012.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00013.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00015.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00016.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00017.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00018.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00019.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00021.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00026.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00031.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00034.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00035.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00036.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00037.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00038.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00039.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00040.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00041.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00042.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00043.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00044.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00046.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00048.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00049.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00050.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00051.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00052.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00053.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00054.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00055.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00056.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00057.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00058.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00059.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00060.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00061.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00062.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00063.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00065.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00067.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00068.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00069.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00070.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00072.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00073.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00075.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00076.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00077.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00084.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00086.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00088.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00090.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00092.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00094.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00095.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00096.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00097.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00098.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00099.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00100.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00101.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00102.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00109.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00115.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00116.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00145.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00146.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00147.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00148.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00149.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00150.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00151.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00152.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00153.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00154.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00155.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00156.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00158.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00160.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00162.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00164.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00166.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00168.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00170.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00171.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00172.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00173.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00174.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00175.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00177.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00178.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00179.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00180.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00181.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00182.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00183.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00184.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00185.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00186.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00187.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00188.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00189.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00190.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00191.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00192.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00193.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00194.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00195.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00196.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00197.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00198.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00199.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00200.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00201.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00202.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00203.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00205.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00206.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00207.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00208.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00209.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00210.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00212.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00213.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00215.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00216.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00217.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00218.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00219.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00253.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00272.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00279.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00280.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00281.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00282.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00283.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00284.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00285.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00286.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00304.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00314.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00316.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00317.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00325.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00326.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00327.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00328.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00330.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00331.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00341.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00342.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00347.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00356.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00359.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00372.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00374.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00375.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00376.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00385.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00389.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00391.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00397.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00401.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00406.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00410.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00414.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00419.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00431.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00432.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00433.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00434.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00435.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00436.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00437.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00438.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00447.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00454.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00455.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00457.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00458.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00462.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00463.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00464.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00465.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00470.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00471.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00478.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00486.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/a00488.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/annotated.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/concepts.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/deprecated.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.css [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.png [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/files.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_enum.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_eval.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_func.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_rela.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_type.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_vars.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/globals.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/globals_func.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/hierarchy.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/modules.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_enum.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_eval.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_func.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_type.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/namespaces.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/pages.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_do_body_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_for_body_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_body_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_lambda_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_scan_body_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_sort_iter_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/range_req.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_b.gif [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_l.gif [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_r.gif [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/doc/html/tabs.css [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_queue_internal.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/_tbb_windef.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/aligned_space.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/atomic.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range2d.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range3d.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/cache_aligned_allocator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/combinable.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/condition_variable [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/ppl.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/thread [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_hash_map.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_queue.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_unordered_map.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_vector.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/critical_section.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/enumerable_thread_specific.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/ibm_aix51.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_common.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia32.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia64.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_intel64.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/mac_ppc.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/macos_common.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/sunos_sparc.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_api.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_ia32.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_intel64.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/xbox360_ppc.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_rw_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_do.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for_each.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_invoke.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_reduce.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_scan.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_sort.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_while.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/partitioner.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/pipeline.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_rw_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/reader_writer_lock.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/recursive_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/scalable_allocator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_rw_mutex.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/task.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_group.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_init.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_observer.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_allocator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_config.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_exception.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_machine.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_profiling.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_stddef.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_thread.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbbmalloc_proxy.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/include/tbb/tick_count.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/Makefile [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/task_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/old/test_mutex_v2.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_cutoff.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/perf_sched.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/run_statistics.sh [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics_xml.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_framework.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map_fill.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_locked_work.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_sandbox.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/perf/time_vector.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/library_assert.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_factory.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_omp.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_tbb.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/include/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_base.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_omp.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_tbb.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_nested.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_simple.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_simple.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/thread_level.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/irml.rc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/job_automaton.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/lin-rml-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/rml_server.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/thread_monitor.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/wait_counter.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win32-rml-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win64-rml-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/rml_omp_stub.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_job_automaton.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_mixed.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_tbb.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_server.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_thread_monitor.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/cilk-tbb-interop.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_hash_map.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_queue.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_vector.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/condition_variable.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/critical_section.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/custom_scheduler.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/log2.s [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/pause.s [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/index.html [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/intrusive_list.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin32-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac32-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac64-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/mailbox.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/pipeline.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/private_server.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/reader_writer_lock.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/recursive_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_common.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_utility.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/semaphore.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_rw_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/task.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_group_context.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_stream.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_assert_impl.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_resource.rc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_thread.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_version.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tls.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/disable_warnings.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/win32-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbb/xbox360-tbb-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Customize.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/LifoList.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/MapMemory.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Statistics.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backend.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backref.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/frontend.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/large_objects.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_allocator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_assert.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_bad_expr.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_barrier.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_concurrency_tracker.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_cpu.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_eh.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_inject_scheduler.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_iterator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_m128.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_memory.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/harness_report.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator_STL.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_aligned_space.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator_STL.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_assembly.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_atomic.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range2d.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range3d.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator_STL.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_cilk_interop.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_combinable.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_hash_map.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_monitor.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_queue.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_unordered.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_vector.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_condition_variable.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_critical_section.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_algorithms.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_tasks.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_enumerable_thread_specific.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_fast_random.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_halt.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_handle_perror.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_inits_loop.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_intrusive_list.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_ittnotify.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_lambda.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_atexit.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_compliance.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_init_shutdown.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_lib_unload.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_overload.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_pure_c.c [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_regression.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_whitebox.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_model_plugin.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex_native_threads.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_openmp.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_do.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for_each.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_invoke.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_pipeline.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_reduce.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_scan.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_sort.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_while.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline_with_tbf.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_reader_writer_lock.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_rwm_upgrade_downgrade.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_semaphore.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_std_thread.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_assertions.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_auto_init.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_group.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_leaks.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_init.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_observer.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_condition_variable.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_header.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_thread.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_version.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_thread.h [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_tick_count.cpp [new file with mode: 0644]
deal.II/contrib/tbb/tbb30_104oss/src/test/test_yield.cpp [new file with mode: 0644]

diff --git a/deal.II/contrib/tbb/tbb30_104oss/CHANGES b/deal.II/contrib/tbb/tbb30_104oss/CHANGES
new file mode 100644 (file)
index 0000000..4a17d93
--- /dev/null
@@ -0,0 +1,973 @@
+TBB 3.0 Update 3 commercial-aligned release
+
+Changes (w.r.t. TBB 3.0 Update 2 commercial-aligned release):
+
+- cache_aligned_allocator class reworked to use scalable_aligned_malloc.
+- Improved performance of count() and equal_range() methods
+    in concurrent_unordered_map.
+- Improved implementation of 64-bit atomic loads and stores on 32-bit
+    platforms, including compilation with VC 7.1.
+- Added implementation of atomic operations on top of OSAtomic API
+    provided by Mac OS* X.
+- Fixed a data race in task scheduler destruction that on rare occasion
+  could result in memory corruption.
+- Removed gratuitous try/catch blocks surrounding thread function calls
+  in tbb_thread.
+- Xcode* projects were added for sudoku and game_of_life examples.
+- Xcode* projects were updated to work without TBB framework.
+
+Open-source contributions integrated:
+
+- MinGW-64 basic support by brsomoza (partially).
+- Patch for atomic.h by Andrey Semashev.
+- Support for AIX & GCC on PowerPC by Giannis Papadopoulos.
+- Various improvements by Raf Schietekat.
+
+------------------------------------------------------------------------
+TBB 3.0 Update 2 commercial-aligned release
+
+Changes (w.r.t. TBB 3.0 Update 1 commercial-aligned release):
+
+- Destructor of tbb::task_group class throws missing_wait exception
+    if there are tasks running when it is invoked.
+- Cilk-TBB interop layer added to protect TBB TLS in case of
+    "Cilk-TBB-Cilk nesting" usage model.
+- Compilation fix for dependent template names in concurrent_queue.
+- Memory allocator code refactored to ease development and maintenance.
+
+Bug Fixes:
+- Improved interoperability with other Intel software tools on Linux in 
+    case of dynamic replacement of memory allocator (1700)
+- Fixed install issues that prevented installation on 
+    Mac OS* X 10.6.4 (1711).
+
+------------------------------------------------------------------------
+TBB 3.0 Update 1 commercial-aligned release
+
+Changes (w.r.t. TBB 3.0 commercial-aligned release):
+
+- Decreased memory fragmentation by allocations bigger than 8K.
+- Lazily allocate worker threads, to avoid creating unnecessary stacks.
+
+Bugs fixed: 
+
+- TBB allocator used much more memory than malloc (1703) - see above.
+- Deadlocks happened in some specific initialization scenarios 
+    of the TBB allocator (1701, 1704).
+- Regression in enumerable_thread_specific: excessive requirements
+    for object constructors.
+- A bug in construction of parallel_pipeline filters when body instance 
+    was a temporary object.
+- Incorrect usage of memory fences on PowerPC and XBOX360 platforms.
+- A subtle issue in task group context binding that could result 
+    in cancelation signal being missed by nested task groups.
+- Incorrect construction of concurrent_unordered_map if specified
+    number of buckets is not power of two.
+- Broken count() and equal_range() of concurrent_unordered_map.
+- Return type of postfix form of operator++ for hash map's iterators.
+
+------------------------------------------------------------------------
+TBB 3.0 commercial-aligned release
+
+Changes (w.r.t. TBB 2.2 Update 3 commercial-aligned release):
+
+- All open-source-release changes down to TBB 2.2 U3 below
+    were incorporated into this release.
+
+------------------------------------------------------------------------
+20100406 open-source release
+
+Changes (w.r.t. 20100310 open-source release):
+
+- Added support for Microsoft* Visual Studio* 2010, including binaries.
+- Added a PDF file with recommended Design Patterns for TBB.
+- Added parallel_pipeline function and companion classes and functions
+    that provide a strongly typed lambda-friendly pipeline interface.
+- Reworked enumerable_thread_specific to use a custom implementation of
+    hash map that is more efficient for ETS usage models.
+- Added example for class task_group; see examples/task_group/sudoku.
+- Removed two examples, as they were long outdated and superceded:
+    pipeline/text_filter (use pipeline/square);
+    parallel_while/parallel_preorder (use parallel_do/parallel_preorder).
+- PDF documentation updated.
+- Other fixes and changes in code, tests, and examples.
+
+Bugs fixed: 
+
+- Eliminated build errors with MinGW32.
+- Fixed post-build step and other issues in VS projects for examples.
+- Fixed discrepancy between scalable_realloc and scalable_msize that 
+    caused crashes with malloc replacement on Windows.
+
+------------------------------------------------------------------------
+20100310 open-source release
+
+Changes (w.r.t. TBB 2.2 Update 3 commercial-aligned release):
+
+- Version macros changed in anticipation of a future release.
+- Directory structure aligned with Intel(R) C++ Compiler;
+    now TBB binaries reside in <arch>/<os_key>/[bin|lib]
+    (in TBB 2.x, it was [bin|lib]/<arch>/<os_key>).
+- Visual Studio projects changed for examples: instead of separate set
+    of files for each VS version, now there is single 'msvs' directory
+    that contains workspaces for MS C++ compiler (<example>_cl.sln) and
+    Intel C++ compiler (<example>_icl.sln). Works with VS 2005 and above.
+- The name versioning scheme for backward compatibility was improved;
+    now compatibility-breaking changes are done in a separate namespace.
+- Added concurrent_unordered_map implementation based on a prototype
+    developed in Microsoft for a future version of PPL.
+- Added PPL-compatible writer-preference RW lock (reader_writer_lock).
+- Added TBB_IMPLEMENT_CPP0X macro to control injection of C++0x names
+    implemented in TBB into namespace std.
+- Added almost-C++0x-compatible std::condition_variable, plus a bunch
+    of other C++0x classes required by condition_variable.
+- With TBB_IMPLEMENT_CPP0X, tbb_thread can be also used as std::thread.
+- task.cpp was split into several translation units to structure 
+    TBB scheduler sources layout. Static data layout and library 
+    initialization logic were also updated.
+- TBB scheduler reworked to prevent master threads from stealing
+    work belonging to other masters.
+- Class task was extended with enqueue() method, and slightly changed
+    semantics of methods spawn() and destroy(). For exact semantics,
+    refer to TBB Reference manual.
+- task_group_context now allows for destruction by non-owner threads.
+- Added TBB_USE_EXCEPTIONS macro to control use of exceptions in TBB
+    headers. It turns off (i.e. sets to 0) automatically if specified
+    compiler options disable exception handling.
+- TBB is enabled to run on top of Microsoft's Concurrency Runtime
+    on Windows* 7 (via our worker dispatcher known as RML).
+- Removed old unused busy-waiting code in concurrent_queue.
+- Described the advanced build & test options in src/index.html.
+- Warning level for GCC raised with -Wextra and a few other options.
+- Multiple fixes and improvements in code, tests, examples, and docs.
+
+Open-source contributions integrated:
+
+- Xbox support by Roman Lut (Deep Shadows), though further changes are
+    required to make it working; e.g. post-2.1 entry points are missing.
+- "Eventcount" by Dmitry Vyukov evolved into concurrent_monitor,
+    an internal class used in the implementation of concurrent_queue.
+
+------------------------------------------------------------------------
+TBB 2.2 Update 3 commercial-aligned release
+
+Changes (w.r.t. TBB 2.2 Update 2 commercial-aligned release):
+
+- PDF documentation updated.
+
+Bugs fixed: 
+
+- concurrent_hash_map compatibility issue exposed on Linux in case
+    two versions of the container were used by different modules.
+- enforce 16 byte stack alignment for consistence with GCC; required 
+    to work correctly with 128-bit variables processed by SSE.
+- construct() methods of allocator classes now use global operator new.
+
+------------------------------------------------------------------------
+TBB 2.2 Update 2 commercial-aligned release
+
+Changes (w.r.t. TBB 2.2 Update 1 commercial-aligned release):
+
+- parallel_invoke and parallel_for_each now take function objects
+    by const reference, not by value.
+- Building TBB with /MT is supported, to avoid dependency on particular
+    versions of Visual C++* runtime DLLs. TBB DLLs built with /MT 
+    are located in vc_mt directory.
+- Class critical_section introduced.
+- Improvements in exception support: new exception classes introduced,
+    all exceptions are thrown via an out-of-line internal method.
+- Improvements and fixes in the TBB allocator and malloc replacement,
+    including robust memory identification, and more reliable dynamic
+    function substitution on Windows*.
+- Method swap() added to class tbb_thread.
+- Methods rehash() and bucket_count() added to concurrent_hash_map.
+- Added support for Visual Studio* 2010 Beta2. No special binaries 
+    provided, but CRT-independent DLLs (vc_mt) should work.
+- Other fixes and improvements in code, tests, examples, and docs.
+
+Open-source contributions integrated:
+
+- The fix to build 32-bit TBB on Mac OS* X 10.6.
+- GCC-based port for SPARC Solaris by Michailo Matijkiw, with use of
+    earlier work by Raf Schietekat.
+
+Bugs fixed: 
+
+- 159 - TBB build for PowerPC* running Mac OS* X.
+- 160 - IBM* Java segfault if used with TBB allocator.
+- crash in concurrent_queue<char> (1616).
+
+------------------------------------------------------------------------
+TBB 2.2 Update 1 commercial-aligned release
+
+Changes (w.r.t. TBB 2.2 commercial-aligned release):
+
+- Incorporates all changes from open-source releases below.
+- Documentation was updated.
+- TBB scheduler auto-initialization now covers all possible use cases.
+- concurrent_queue: made argument types of sizeof used in paddings
+  consistent with those actually used.
+- Memory allocator was improved: supported corner case of user's malloc 
+    calling scalable_malloc (non-Windows), corrected processing of 
+    memory allocation requests during tbb memory allocator startup 
+    (Linux).
+- Windows malloc replacement has got better support for static objects.
+- In pipeline setups that do not allow actual parallelism, execution 
+    by a single thread is guaranteed, idle spinning eliminated, and 
+    performance improved.
+- RML refactoring and clean-up.
+- New constructor for concurrent_hash_map allows reserving space for 
+    a number of items.
+- Operator delete() added to the TBB exception classes.
+- Lambda support was improved in parallel_reduce.
+- gcc 4.3 warnings were fixed for concurrent_queue.
+- Fixed possible initialization deadlock in modules using TBB entities
+    during construction of global static objects.
+- Copy constructor in concurrent_hash_map was fixed.
+- Fixed a couple of rare crashes in the scheduler possible before 
+    in very specific use cases.
+- Fixed a rare crash in the TBB allocator running out of memory.
+- New tests were implemented, including test_lambda.cpp that checks 
+    support for lambda expressions.
+- A few other small changes in code, tests, and documentation.
+
+------------------------------------------------------------------------
+20090809 open-source release
+
+Changes (w.r.t. TBB 2.2 commercial-aligned release):
+
+- Fixed known exception safety issues in concurrent_vector.
+- Better concurrency of simultaneous grow requests in concurrent_vector.
+- TBB allocator further improves performance of large object allocation.
+- Problem with source of text relocations was fixed on Linux
+- Fixed bugs related to malloc replacement under Windows
+- A few other small changes in code and documentation.
+
+------------------------------------------------------------------------
+TBB 2.2 commercial-aligned release
+
+Changes (w.r.t. TBB 2.1 U4 commercial-aligned release):
+
+- Incorporates all changes from open-source releases below.
+- Architecture folders renamed from em64t to intel64 and from itanium
+    to ia64.
+- Major Interface version changed from 3 to 4. Deprecated interfaces 
+    might be removed in future releases.
+- Parallel algorithms that use partitioners have switched to use 
+    the auto_partitioner by default.
+- Improved memory allocator performance for allocations bigger than 8K.
+- Added new thread-bound filters functionality for pipeline.
+- New implementation of concurrent_hash_map that improves performance 
+    significantly.
+- A few other small changes in code and documentation.
+
+------------------------------------------------------------------------
+20090511 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Basic support for MinGW32 development kit.
+- Added tbb::zero_allocator class that initializes memory with zeros.
+    It can be used as an adaptor to any STL-compatible allocator class.
+- Added tbb::parallel_for_each template function as alias to parallel_do.
+- Added more overloads for tbb::parallel_for.
+- Added support for exact exception propagation (can only be used with 
+    compilers that support C++0x std::exception_ptr).
+- tbb::atomic template class can be used with enumerations.
+- mutex, recursive_mutex, spin_mutex, spin_rw_mutex classes extended
+    with explicit lock/unlock methods.
+- Fixed size() and grow_to_at_least() methods of tbb::concurrent_vector
+    to provide space allocation guarantees. More methods added for
+    compatibility with std::vector, including some from C++0x.
+- Preview of a lambda-friendly interface for low-level use of tasks.
+- scalable_msize function added to the scalable allocator (Windows only).
+- Rationalized internal auxiliary functions for spin-waiting and backoff.
+- Several tests undergo decent refactoring.
+
+Changes affecting backward compatibility:
+
+- Improvements in concurrent_queue, including limited API changes. 
+    The previous version is deprecated; its functionality is accessible 
+    via methods of the new tbb::concurrent_bounded_queue class.
+- grow* and push_back methods of concurrent_vector changed to return
+    iterators; old semantics is deprecated. 
+
+------------------------------------------------------------------------
+TBB 2.1 Update 4 commercial-aligned release
+
+Changes (w.r.t. TBB 2.1 U3 commercial-aligned release):
+
+- Added tests for aligned memory allocations and malloc replacement.
+- Several improvements for better bundling with Intel(R) C++ Compiler.
+- A few other small changes in code and documentaion.
+
+Bugs fixed: 
+
+- 150 - request to build TBB examples with debug info in release mode.
+- backward compatibility issue with concurrent_queue on Windows.
+- dependency on VS 2005 SP1 runtime libraries removed.
+- compilation of GUI examples under Xcode* 3.1 (1577).
+- On Windows, TBB allocator classes can be instantiated with const types 
+    for compatibility with MS implementation of STL containers (1566).
+
+------------------------------------------------------------------------
+20090313 open-source release
+
+Changes (w.r.t. 20081109 open-source release):
+
+- Includes all changes introduced in TBB 2.1 Update 2 & Update 3
+    commercial-aligned releases (see below for details).
+- Added tbb::parallel_invoke template function. It runs up to 10 
+    user-defined functions in parallel and waits for them to complete.
+- Added a special library providing ability to replace the standard
+    memory allocation routines in Microsoft* C/C++ RTL (malloc/free,
+    global new/delete, etc.) with the TBB memory allocator. 
+    Usage details are described in include/tbb/tbbmalloc_proxy.h file.
+- Task scheduler switched to use new implementation of its core 
+    functionality (deque based task pool, new structure of arena slots).
+- Preview of Microsoft* Visual Studio* 2005 project files for 
+    building the library is available in build/vsproject folder.
+- Added tests for aligned memory allocations and malloc replacement.
+- Added parallel_for/game_of_life.net example (for Windows only) 
+    showing TBB usage in a .NET application.
+- A number of other fixes and improvements to code, tests, makefiles,
+    examples and documents.
+
+Bugs fixed: 
+
+- The same list as in TBB 2.1 Update 4 right above.
+
+------------------------------------------------------------------------
+TBB 2.1 Update 3 commercial-aligned release
+
+Changes (w.r.t. TBB 2.1 U2 commercial-aligned release):
+
+- Added support for aligned allocations to the TBB memory allocator.
+- Added a special library to use with LD_PRELOAD on Linux* in order to 
+    replace the standard memory allocation routines in C/C++ with the 
+    TBB memory allocator.
+- Added null_mutex and null_rw_mutex: no-op classes interface-compliant 
+    to other TBB mutexes.
+- Improved performance of parallel_sort, to close most of the serial gap
+    with std::sort, and beat it on 2 and more cores.
+- A few other small changes.
+
+Bugs fixed:
+
+- the problem where parallel_for hanged after exception throw 
+    if affinity_partitioner was used (1556).
+- get rid of VS warnings about mbstowcs deprecation (1560),
+    as well as some other warnings.
+- operator== for concurrent_vector::iterator fixed to work correctly
+    with different vector instances.
+
+------------------------------------------------------------------------
+TBB 2.1 Update 2 commercial-aligned release
+
+Changes (w.r.t. TBB 2.1 U1 commercial-aligned release):
+
+- Incorporates all open-source-release changes down to TBB 2.1 U1,
+    except for:
+    - 20081019 addition of enumerable_thread_specific;
+- Warning level for Microsoft* Visual C++* compiler raised to /W4 /Wp64;
+    warnings found on this level were cleaned or suppressed.
+- Added TBB_runtime_interface_version API function.
+- Added new example: pipeline/square.
+- Added exception handling and cancellation support
+    for parallel_do and pipeline.
+- Added copy constructor and [begin,end) constructor to concurrent_queue.
+- Added some support for beta version of Intel(R) Parallel Amplifier.
+- Added scripts to set environment for cross-compilation of 32-bit 
+    applications on 64-bit Linux with Intel(R) C++ Compiler.
+- Fixed semantics of concurrent_vector::clear() to not deallocate
+    internal arrays. Fixed compact() to perform such deallocation later.
+- Fixed the issue with atomic<T*> when T is incomplete type.
+- Improved support for PowerPC* Macintosh*, including the fix 
+    for a bug in masked compare-and-swap reported by a customer.
+- As usual, a number of other improvements everywhere.
+
+------------------------------------------------------------------------
+20081109 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Added new serial out of order filter for tbb::pipeline.
+- Fixed the issue with atomic<T*>::operator= reported at the forum.
+- Fixed the issue with using tbb::task::self() in task destructor 
+    reported at the forum.
+- A number of other improvements to code, tests, makefiles, examples 
+    and documents.
+
+Open-source contributions integrated: 
+- Changes in the memory allocator were partially integrated.
+
+------------------------------------------------------------------------
+20081019 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Introduced enumerable_thread_specific<T>.  This new class provides a 
+    wrapper around native thread local storage as well as iterators and 
+    ranges for accessing the thread local copies (1533).
+- Improved support for Intel(R) Threading Analysis Tools
+    on Intel(R) 64 architecture.
+- Dependency from Microsoft* CRT was integrated to the libraries using 
+    manifests, to avoid issues if called from code that uses different 
+    version of Visual C++* runtime than the library.
+- Introduced new defines TBB_USE_ASSERT, TBB_USE_DEBUG, 
+    TBB_USE_PERFORMANCE_WARNINGS, TBB_USE_THREADING_TOOLS.
+- A number of other improvements to code, tests, makefiles, examples 
+    and documents.
+
+Open-source contributions integrated:
+
+- linker optimization: /incremental:no .
+
+------------------------------------------------------------------------
+20080925 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Same fix for a memory leak in the memory allocator as in TBB 2.1 U1.
+- Improved support for lambda functions.
+- Fixed more concurrent_queue issues reported at the forum.
+- A number of other improvements to code, tests, makefiles, examples 
+    and documents.
+
+------------------------------------------------------------------------
+TBB 2.1 Update 1 commercial-aligned release
+
+Changes (w.r.t. TBB 2.1 commercial-aligned release):
+
+- Fixed small memory leak in the memory allocator.
+- Incorporates all open-source-release changes since TBB 2.1, except for:
+    - 20080825 changes for parallel_do;
+
+------------------------------------------------------------------------
+20080825 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Added exception handling and cancellation support for parallel_do.
+- Added default HashCompare template argument for concurrent_hash_map.
+- Fixed concurrent_queue.clear() issues due to incorrect assumption
+    about clear() being private method.
+- Added the possibility to use TBB in applications that change
+    default calling conventions (Windows* only).
+- Many improvements to code, tests, examples, makefiles and documents.
+
+Bugs fixed:
+
+- 120, 130 - memset declaration missed in concurrent_hash_map.h
+
+------------------------------------------------------------------------
+20080724 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Inline assembly for atomic operations improved for gcc 4.3
+- A few more improvements to the code.
+
+------------------------------------------------------------------------
+20080709 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- operator=() was added to the tbb_thread class according to
+    the current working draft for std::thread.
+- Recognizing SPARC* in makefiles for Linux* and Sun Solaris*.
+
+Bugs fixed:
+
+- 127 - concurrent_hash_map::range fixed to split correctly.
+
+Open-source contributions integrated:
+
+- fix_set_midpoint.diff by jyasskin
+- SPARC* support in makefiles by Raf Schietekat
+
+------------------------------------------------------------------------
+20080622 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Fixed a hang that rarely happened on Linux
+    during deinitialization of the TBB scheduler.
+- Improved support for Intel(R) Thread Checker.
+- A few more improvements to the code.
+
+------------------------------------------------------------------------
+TBB 2.1 commercial-aligned release
+
+Changes (w.r.t. TBB 2.0 U3 commercial-aligned release):
+
+- All open-source-release changes down to, and including, TBB 2.0 below,
+    were incorporated into this release.
+
+------------------------------------------------------------------------
+20080605 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Explicit control of exported symbols by version scripts added on Linux.
+- Interfaces polished for exception handling & algorithm cancellation.
+- Cache behavior improvements in the scalable allocator.
+- Improvements in text_filter, polygon_overlay, and other examples.
+- A lot of other stability improvements in code, tests, and makefiles.
+- First release where binary packages include headers/docs/examples, so
+    binary packages are now self-sufficient for using TBB.
+
+Open-source contributions integrated:
+
+- atomics patch (partially).
+- tick_count warning patch.
+
+Bugs fixed:
+
+- 118 - fix for boost compatibility.
+- 123 - fix for tbb_machine.h.
+
+------------------------------------------------------------------------
+20080512 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Fixed a problem with backward binary compatibility
+    of debug Linux builds.
+- Sun* Studio* support added.
+- soname support added on Linux via linker script. To restore backward 
+    binary compatibility, *.so -> *.so.2 softlinks should be created.
+- concurrent_hash_map improvements - added few new forms of insert() 
+    method and fixed precondition and guarantees of erase() methods.   
+    Added runtime warning reporting about bad hash function used for 
+    the container. Various improvements for performance and concurrency.
+- Cancellation mechanism reworked so that it does not hurt scalability.
+- Algorithm parallel_do reworked. Requirement for Body::argument_type 
+    definition removed, and work item argument type can be arbitrarily 
+    cv-qualified.
+- polygon_overlay example added.
+- A few more improvements to code, tests, examples and Makefiles.
+
+Open-source contributions integrated:
+
+- Soname support patch for Bugzilla #112.
+
+Bugs fixed:
+
+- 112 - fix for soname support.
+
+------------------------------------------------------------------------
+TBB 2.0 U3 commercial-aligned release (package 017, April 20, 2008)
+
+Corresponds to commercial 019 (for Linux*, 020; for Mac OS* X, 018)
+packages.
+
+Changes (w.r.t. TBB 2.0 U2 commercial-aligned release):
+
+- Does not contain open-source-release changes below; this release is
+    only a minor update of TBB 2.0 U2.
+- Removed spin-waiting in pipeline and concurrent_queue.
+- A few more small bug fixes from open-source releases below.
+
+------------------------------------------------------------------------
+20080408 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- count_strings example reworked: new word generator implemented, hash 
+    function replaced, and tbb_allocator is used with std::string class.
+- Static methods of spin_rw_mutex were replaced by normal member 
+    functions, and the class name was versioned.
+- tacheon example was renamed to tachyon.
+- Improved support for Intel(R) Thread Checker.
+- A few more minor improvements.
+
+Open-source contributions integrated:
+
+- Two sets of Sun patches for IA Solaris support.
+
+------------------------------------------------------------------------
+20080402 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Exception handling and cancellation support for tasks and algorithms 
+    fully enabled.
+- Exception safety guaranties defined and fixed for all concurrent 
+    containers.
+- User-defined memory allocator support added to all concurrent 
+    containers.
+- Performance improvement of concurrent_hash_map, spin_rw_mutex.
+- Critical fix for a rare race condition during scheduler 
+    initialization/de-initialization.
+- New methods added for concurrent containers to be closer to STL,
+    as well as automatic filters removal from pipeline
+    and __TBB_AtomicAND function.
+- The volatile keyword dropped from where it is not really needed.
+- A few more minor improvements.
+
+------------------------------------------------------------------------
+20080319 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Support for gcc version 4.3 was added.
+- tbb_thread class, near compatible with std::thread expected in C++0x, 
+    was added.
+
+Bugs fixed:
+
+- 116 - fix for compilation issues with gcc version 4.2.1.
+- 120 - fix for compilation issues with gcc version 4.3.
+------------------------------------------------------------------------
+20080311 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- An enumerator added for pipeline filter types (serial vs. parallel).
+- New task_scheduler_observer class introduced, to observe when
+    threads start and finish interacting with the TBB task scheduler.
+- task_scheduler_init reverted to not use internal versioned class;
+    binary compatibility guaranteed with stable releases only.
+- Various improvements to code, tests, examples and Makefiles.
+------------------------------------------------------------------------
+20080304 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Task-to-thread affinity support, previously kept under a macro,
+    now fully legalized.
+- Work-in-progress on cache_aligned_allocator improvements.
+- Pipeline really supports parallel input stage; it's no more serialized.
+- Various improvements to code, tests, examples and Makefiles.
+Bugs fixed:
+
+- 119 - fix for scalable_malloc sometimes failing to return a big block.
+- TR575 - fixed a deadlock occurring on Windows in startup/shutdown
+    under some conditions.
+
+------------------------------------------------------------------------
+20080226 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Introduced tbb_allocator to select between standard allocator and
+    tbb::scalable_allocator when available.
+- Removed spin-waiting in pipeline and concurrent_queue.
+- Improved performance of concurrent_hash_map by using tbb_allocator.
+- Improved support for Intel(R) Thread Checker.
+- Various improvements to code, tests, examples and Makefiles.
+
+------------------------------------------------------------------------
+TBB 2.0 U2 commercial-aligned release (package 017, February 14, 2008)
+
+Corresponds to commercial 017 (for Linux*, 018; for Mac OS* X, 016)
+packages.
+
+Changes (w.r.t. TBB 2.0 U1 commercial-aligned release):
+
+- Does not contain open-source-release changes below; this release is
+    only a minor update of TBB 2.0 U1.
+- Add support for Microsoft* Visual Studio* 2008, including binary
+    libraries and VS2008 projects for examples.
+- Use SwitchToThread() not Sleep() to yield threads on Windows*.
+- Enhancements to Doxygen-readable comments in source code.
+- A few more small bug fixes from open-source releases below.
+
+Bugs fixed:
+
+- TR569 - Memory leak in concurrent_queue.
+
+------------------------------------------------------------------------
+20080207 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Improvements and minor fixes in VS2008 projects for examples.
+- Improvements in code for gating worker threads that wait for work,
+  previously consolidated under #if IMPROVED_GATING, now legalized.
+- Cosmetic changes in code, examples, tests.
+
+Bugs fixed:
+
+- 113 - Iterators and ranges should be convertible to their const
+    counterparts.
+- TR569 - Memory leak in concurrent_queue.
+
+------------------------------------------------------------------------
+20080122 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Updated examples/parallel_for/seismic to improve the visuals and to
+    use the affinity_partitioner (20071127 and forward) for better
+    performance.
+- Minor improvements to unittests and performance tests.
+
+------------------------------------------------------------------------
+20080115 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Cleanup, simplifications and enhancements to the Makefiles for
+    building the libraries (see build/index.html for high-level
+    changes) and the examples.
+- Use SwitchToThread() not Sleep() to yield threads on Windows*.
+- Engineering work-in-progress on exception safety/support.
+- Engineering work-in-progress on affinity_partitioner for
+    parallel_reduce.
+- Engineering work-in-progress on improved gating for worker threads
+    (idle workers now block in the OS instead of spinning).
+- Enhancements to Doxygen-readable comments in source code.
+
+Bugs fixed:
+
+- 102 - Support for parallel build with gmake -j
+- 114 - /Wp64 build warning on Windows*.
+
+------------------------------------------------------------------------
+20071218 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Full support for Microsoft* Visual Studio* 2008 in open-source.
+    Binaries for vc9/ will be available in future stable releases.
+- New recursive_mutex class.
+- Full support for 32-bit PowerMac including export files for builds.
+- Improvements to parallel_do.
+
+------------------------------------------------------------------------
+20071206 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Support for Microsoft* Visual Studio* 2008 in building libraries
+    from source as well as in vc9/ projects for examples.
+- Small fixes to the affinity_partitioner first introduced in 20071127.
+- Small fixes to the thread-stack size hook first introduced in 20071127.
+- Engineering work in progress on concurrent_vector.
+- Engineering work in progress on exception behavior.
+- Unittest improvements.
+
+------------------------------------------------------------------------
+20071127 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- Task-to-thread affinity support (affinity partitioner) first appears.
+- More work on concurrent_vector.
+- New parallel_do algorithm (function-style version of parallel while)
+    and parallel_do/parallel_preorder example.
+- New task_scheduler_init() hooks for getting default_num_threads() and
+    for setting thread stack size.
+- Support for weak memory consistency models in the code base.
+- Futex usage in the task scheduler (Linux).
+- Started adding 32-bit PowerMac support.
+- Intel(R) 9.1 compilers are now the base supported Intel(R) compiler
+    version.
+- TBB libraries added to link line automatically on Microsoft Windows*
+    systems via #pragma comment linker directives.
+
+Open-source contributions integrated:
+
+- FreeBSD platform support patches.
+- AIX weak memory model patch.
+
+Bugs fixed:
+
+- 108 - Removed broken affinity.h reference.
+- 101 - Does not build on Debian Lenny (replaced arch with uname -m).
+
+------------------------------------------------------------------------
+20071030 open-source release
+
+Changes (w.r.t. previous open-source release):
+
+- More work on concurrent_vector.
+- Better support for building with -Wall -Werror (or not) as desired.
+- A few fixes to eliminate extraneous warnings.
+- Begin introduction of versioning hooks so that the internal/API
+    version is tracked via TBB_INTERFACE_VERSION.  The newest binary
+    libraries should always work with previously-compiled code when-
+    ever possible.
+- Engineering work in progress on using futex inside the mutexes (Linux).
+- Engineering work in progress on exception behavior.
+- Engineering work in progress on a new parallel_do algorithm.
+- Unittest improvements.
+
+------------------------------------------------------------------------
+20070927 open-source release
+
+Changes (w.r.t. TBB 2.0 U1 commercial-aligned release):
+
+- Minor update to TBB 2.0 U1 below.
+- Begin introduction of new concurrent_vector interfaces not released
+    with TBB 2.0 U1.
+
+------------------------------------------------------------------------
+TBB 2.0 U1 commercial-aligned release (package 014, October 1, 2007)
+
+Corresponds to commercial 014 (for Linux*, 016) packages.
+
+Changes (w.r.t. TBB 2.0 commercial-aligned release):
+
+- All open-source-release changes down to, and including, TBB 2.0 below,
+    were incorporated into this release.
+- Made a number of changes to the officially supported OS list:
+    Added Linux* OSs:
+       Asianux* 3, Debian* 4.0, Fedora Core* 6, Fedora* 7,
+       Turbo Linux* 11, Ubuntu* 7.04;
+    Dropped Linux* OSs:
+       Asianux* 2, Fedora Core* 4, Haansoft* Linux 2006 Server,
+       Mandriva/Mandrake* 10.1, Miracle Linux* 4.0,
+       Red Flag* DC Server 5.0;
+    Only Mac OS* X 10.4.9 (and forward) and Xcode* tool suite 2.4.1 (and
+       forward) are now supported.
+- Commercial installers on Linux* fixed to recommend the correct
+    binaries to use in more cases, with less unnecessary warnings.
+- Changes to eliminate spurious build warnings.
+
+Open-source contributions integrated:
+
+- Two small header guard macro patches; it also fixed bug #94.
+- New blocked_range3d class.
+
+Bugs fixed:
+
+- 93 - Removed misleading comments in task.h.
+- 94 - See above.
+
+------------------------------------------------------------------------
+20070815 open-source release
+
+Changes:
+
+- Changes to eliminate spurious build warnings.
+- Engineering work in progress on concurrent_vector allocator behavior.
+- Added hooks to use the Intel(R) compiler code coverage tools.
+
+Open-source contributions integrated:
+
+- Mac OS* X build warning patch.
+
+Bugs fixed:
+
+- 88 - Fixed TBB compilation errors if both VS2005 and Windows SDK are
+    installed.
+
+------------------------------------------------------------------------
+20070719 open-source release
+
+Changes:
+
+- Minor update to TBB 2.0 commercial-aligned release below.
+- Changes to eliminate spurious build warnings.
+
+------------------------------------------------------------------------
+TBB 2.0 commercial-aligned release (package 010, July 19, 2007)
+
+Corresponds to commercial 010 (for Linux*, 012) packages.
+
+- TBB open-source debut release.
+
+------------------------------------------------------------------------
+TBB 1.1 commercial release (April 10, 2007)
+
+Changes (w.r.t. TBB 1.0 commercial release):
+
+- auto_partitioner which offered an automatic alternative to specifying
+    a grain size parameter to estimate the best granularity for tasks.
+- The release was added to the Intel(R) C++ Compiler 10.0 Pro.
+
+------------------------------------------------------------------------
+TBB 1.0 Update 2 commercial release
+
+Changes (w.r.t. TBB 1.0 Update 1 commercial release):
+
+- Mac OS* X 64-bit support added.
+- Source packages for commercial releases introduced.
+
+------------------------------------------------------------------------
+TBB 1.0 Update 1 commercial-aligned release
+
+Changes (w.r.t. TBB 1.0 commercial release):
+
+- Fix for critical package issue on Mac OS* X.
+
+------------------------------------------------------------------------
+TBB 1.0 commercial release (August 29, 2006)
+
+Changes (w.r.t. TBB 1.0 beta commercial release):
+
+- New namespace (and compatibility headers for old namespace).  
+    Namespaces are tbb and tbb::internal and all classes are in the 
+    underscore_style not the WindowsStyle.
+- New class: scalable_allocator (and cache_aligned_allocator using that 
+    if it exists).
+- Added parallel_for/tacheon example.
+- Removed C-style casts from headers for better C++ compliance.
+- Bug fixes.
+- Documentation improvements.
+- Improved performance of the concurrent_hash_map class.
+- Upgraded parallel_sort() to support STL-style random-access iterators 
+    instead of just pointers.
+- The Windows vs7_1 directories renamed to vs7.1 in examples.
+- New class: spin version of reader-writer lock.
+- Added push_back() interface to concurrent_vector().
+
+------------------------------------------------------------------------
+TBB 1.0 beta commercial release
+
+Initial release.
+
+Features / APIs:
+
+- Concurrent containers: ConcurrentHashTable, ConcurrentVector, 
+    ConcurrentQueue.
+- Parallel algorithms: ParallelFor, ParallelReduce, ParallelScan, 
+    ParallelWhile, Pipeline, ParallelSort.
+- Support: AlignedSpace, BlockedRange (i.e., 1D), BlockedRange2D
+- Task scheduler with multi-master support.
+- Atomics: read, write, fetch-and-store, fetch-and-add, compare-and-swap.
+- Locks: spin, reader-writer, queuing, OS-wrapper.
+- Memory allocation: STL-style memory allocator that avoids false 
+    sharing.
+- Timers.
+
+Tools Support: 
+- Thread Checker 3.0.
+- Thread Profiler 3.0.
+
+Documentation:
+- First Use Documents: README.txt, INSTALL.txt, Release_Notes.txt, 
+    Doc_Index.html, Getting_Started.pdf, Tutorial.pdf, Reference.pdf.
+- Class hierarchy HTML pages (Doxygen).
+- Tree of index.html pages for navigating the installed package, esp. 
+    for the examples.
+
+Examples:
+- One for each of these TBB features: ConcurrentHashTable, ParallelFor, 
+    ParallelReduce, ParallelWhile, Pipeline, Task.
+- Live copies of examples from Getting_Started.pdf.
+- TestAll example that exercises every class and header in the package 
+    (i.e., a "liveness test").
+- Compilers: see Release_Notes.txt.
+- APIs: OpenMP, WinThreads, Pthreads.
+
+Packaging:
+- Package for Windows installs IA-32 and EM64T bits.
+- Package for Linux installs IA-32, EM64T and IPF bits.
+- Package for Mac OS* X installs IA-32 bits.
+- All packages support Intel(R) software setup assistant (ISSA) and 
+    install-time FLEXlm license checking.
+- ISSA support allows license file to be specified directly in case of 
+    no Internet connection or problems with IRC or serial #s.
+- Linux installer allows root or non-root, RPM or non-RPM installs.
+- FLEXlm license servers (for those who need floating/counted licenses) 
+    are provided separately on Intel(R) Premier.
+
+------------------------------------------------------------------------
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/COPYING b/deal.II/contrib/tbb/tbb30_104oss/COPYING
new file mode 100644 (file)
index 0000000..5af6ed8
--- /dev/null
@@ -0,0 +1,353 @@
+                   GNU GENERAL PUBLIC LICENSE
+                      Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                           Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                   GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                           NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                    END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
+----------------     END OF Gnu General Public License     ----------------
+
+The source code of Threading Building Blocks is distributed under version 2
+of the GNU General Public License, with the so-called "runtime exception,"
+as follows (or see any header or implementation file):
+
+   As a special exception, you may use this file as part of a free software
+   library without restriction.  Specifically, if other files instantiate
+   templates or use macros or inline functions from this file, or you compile
+   this file and link it with other files to produce an executable, this
+   file does not by itself cause the resulting executable to be covered by
+   the GNU General Public License.  This exception does not however
+   invalidate any other reasons why the executable file might be covered by
+   the GNU General Public License.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/Makefile b/deal.II/contrib/tbb/tbb30_104oss/Makefile
new file mode 100644 (file)
index 0000000..f8faec0
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+tbb_root?=.
+include $(tbb_root)/build/common.inc
+.PHONY: default all tbb tbbmalloc test examples
+
+#workaround for non-depend targets tbb and tbbmalloc which both depend on version_string.tmp
+#According to documentation submakes should run in parallel
+.NOTPARALLEL: tbb tbbmalloc
+
+default: tbb tbbmalloc
+
+all: tbb tbbmalloc test examples
+
+tbb: mkdir
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbb cfg=debug tbb_root=$(tbb_root)
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbb cfg=release tbb_root=$(tbb_root)
+
+tbbmalloc: mkdir
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc tbb_root=$(tbb_root)
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc tbb_root=$(tbb_root)
+
+test: tbb tbbmalloc
+       -$(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_test tbb_root=$(tbb_root)
+       -$(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.test cfg=debug tbb_root=$(tbb_root)
+       -$(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_test tbb_root=$(tbb_root)
+       -$(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.test cfg=release tbb_root=$(tbb_root) 
+
+rml: mkdir
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.rml cfg=debug tbb_root=$(tbb_root)
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.rml cfg=release tbb_root=$(tbb_root)
+
+
+examples: tbb tbbmalloc
+       $(MAKE) -C examples -r -f Makefile tbb_root=.. release test
+
+.PHONY: clean clean_examples mkdir info
+
+clean: clean_examples
+       $(shell $(RM) $(work_dir)_release$(SLASH)*.* >$(NUL) 2>$(NUL))
+       $(shell $(RD) $(work_dir)_release >$(NUL) 2>$(NUL))
+       $(shell $(RM) $(work_dir)_debug$(SLASH)*.* >$(NUL) 2>$(NUL))
+       $(shell $(RD) $(work_dir)_debug >$(NUL) 2>$(NUL))
+       @echo clean done
+
+clean_examples:
+       $(shell $(MAKE) -s -i -r -C examples -f Makefile tbb_root=.. clean >$(NUL) 2>$(NUL))
+
+mkdir:
+       $(shell $(MD) "$(work_dir)_release" >$(NUL) 2>$(NUL))
+       $(shell $(MD) "$(work_dir)_debug" >$(NUL) 2>$(NUL))
+       @echo Created $(work_dir)_release and ..._debug directories
+
+info:
+       @echo OS: $(tbb_os)
+       @echo arch=$(arch)
+       @echo compiler=$(compiler)
+       @echo runtime=$(runtime)
+       @echo tbb_build_prefix=$(tbb_build_prefix)
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/README b/deal.II/contrib/tbb/tbb30_104oss/README
new file mode 100644 (file)
index 0000000..67ab8ad
--- /dev/null
@@ -0,0 +1,11 @@
+Threading Building Blocks - README
+
+See index.html for directions and documentation.
+
+If source is present (./Makefile and src/ directories),
+type 'gmake' in this directory to build and test.
+
+See examples/index.html for runnable examples and directions.
+
+See http://threadingbuildingblocks.org for full documentation
+and software information.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/AIX.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/AIX.gcc.inc
new file mode 100644 (file)
index 0000000..c248205
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall
+DYLIB_KEY = -shared
+LIBDL = -ldl
+
+TBB_NOSTRICT = 1
+
+CPLUS = g++ 
+CONLY = gcc
+LIB_LINK_FLAGS = -shared
+LIBS = -lpthread -ldl 
+C_FLAGS = $(CPLUS_FLAGS) -x c
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -O2 -DUSE_PTHREAD -pthread
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -pthread
+endif
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (powerpc,$(arch))
+    CPLUS_FLAGS    += -maix64 -Wl,-G
+    LIB_LINK_FLAGS += -maix64 -Wl,-b64 -Wl,-brtl -Wl,-G
+endif 
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+
+ASSEMBLY_SOURCE=ibm_aix51
+ifeq (powerpc,$(arch))
+    TBB_ASM.OBJ = atomic_support.o
+endif
+
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/AIX.inc b/deal.II/contrib/tbb/tbb30_104oss/build/AIX.inc
new file mode 100644 (file)
index 0000000..0d1c561
--- /dev/null
@@ -0,0 +1,74 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifndef arch
+        arch:=$(shell uname -p)
+        export arch
+endif
+
+ifndef runtime
+        gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//')
+        os_version:=$(shell uname -r)
+        os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
+        export runtime:=cc$(gcc_version)_kernel$(os_kernel_version)
+endif
+
+native_compiler := gcc
+export compiler ?= gcc
+debugger ?= gdb
+
+CMD=$(SHELL) -c
+CWD=$(shell pwd)
+RM?=rm -f
+RD?=rmdir
+MD?=mkdir -p
+NUL= /dev/null
+SLASH=/
+MAKE_VERSIONS=sh $(tbb_root)/build/version_info_aix.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp
+MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh
+
+ifdef LIBPATH
+        export LIBPATH := .:$(LIBPATH)
+else
+        export LIBPATH := .
+endif
+
+####### Build settings ########################################################
+
+OBJ = o
+DLL = so
+
+TBB.DEF =
+TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = $(TBB.DLL)
+LINK_TBB.LIB = $(TBB.LIB)
+
+MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = $(MALLOC.DLL)
+
+TBB_NOSTRICT=1
+
+TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.gcc.inc
new file mode 100644 (file)
index 0000000..7c65a71
--- /dev/null
@@ -0,0 +1,93 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall
+DYLIB_KEY = -shared
+
+TBB_NOSTRICT = 1
+
+CPLUS = g++ 
+CONLY = gcc
+LIB_LINK_FLAGS = -shared
+LIBS = -lpthread 
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -O2 -DUSE_PTHREAD
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD
+endif
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (ia64,$(arch))
+# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables
+    CPLUS_FLAGS += $(PIC_KEY)
+endif 
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32
+    LIB_LINK_FLAGS += -m32
+endif 
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-gas
+ifeq (ia64,$(arch))
+    ASM=as
+    TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o
+endif 
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.inc b/deal.II/contrib/tbb/tbb30_104oss/build/FreeBSD.inc
new file mode 100644 (file)
index 0000000..8f2d8c8
--- /dev/null
@@ -0,0 +1,82 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifndef arch
+        ifeq ($(shell uname -m),i386)
+                export arch:=ia32
+        endif
+        ifeq ($(shell uname -m),ia64)
+                export arch:=ia64
+        endif
+        ifeq ($(shell uname -m),amd64)
+                export arch:=intel64
+        endif
+endif
+
+ifndef runtime
+        gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//')
+        os_version:=$(shell uname -r)
+        os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
+        export runtime:=cc$(gcc_version)_kernel$(os_kernel_version)
+endif
+
+native_compiler := gcc
+export compiler ?= gcc
+debugger ?= gdb
+
+CMD=$(SHELL) -c
+CWD=$(shell pwd)
+RM?=rm -f
+RD?=rmdir
+MD?=mkdir -p
+NUL= /dev/null
+SLASH=/
+MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp
+MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh
+
+ifdef LD_LIBRARY_PATH
+        export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH)
+else
+        export LD_LIBRARY_PATH := .
+endif
+
+####### Build settings ########################################################
+
+OBJ = o
+DLL = so
+LIBEXT=so
+
+TBB.DEF = 
+TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = $(TBB.DLL)
+LINK_TBB.LIB = $(TBB.LIB)
+
+MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = $(MALLOC.DLL)
+
+TBB_NOSTRICT=1
+
+TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.rml b/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.rml
new file mode 100644 (file)
index 0000000..aa211e9
--- /dev/null
@@ -0,0 +1,156 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+tbb_root ?= $(TBB30_INSTALL_DIR)
+BUILDING_PHASE=1
+TEST_RESOURCE = $(RML.RES)
+include $(tbb_root)/build/common.inc
+DEBUG_SUFFIX=$(findstring _debug,_$(cfg))
+
+# default target
+default_rml: rml rml_test
+
+RML_ROOT ?= $(tbb_root)/src/rml
+RML_SERVER_ROOT = $(RML_ROOT)/server
+
+VPATH = $(tbb_root)/src/tbb $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE)
+VPATH += $(RML_ROOT)/server $(RML_ROOT)/client $(RML_ROOT)/test $(tbb_root)/src/test
+
+include $(tbb_root)/build/common_rules.inc
+
+#--------------------------------------------------------------------------
+# Define rules for making the RML server shared library and client objects.
+#--------------------------------------------------------------------------
+
+# Object files that make up RML server 
+RML_SERVER.OBJ = rml_server.$(OBJ)
+
+# Object files that RML clients need
+RML_TBB_CLIENT.OBJ = rml_tbb.$(OBJ) dynamic_link.$(OBJ)
+RML_OMP_CLIENT.OBJ = rml_omp.$(OBJ) omp_dynamic_link.$(OBJ)
+
+RML.OBJ = $(RML_SERVER.OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ)
+ifeq (windows,$(tbb_os))
+RML_ASM.OBJ = $(if $(findstring intel64,$(arch)),$(TBB_ASM.OBJ))
+endif
+ifeq (linux,$(tbb_os))
+RML_ASM.OBJ = $(if $(findstring ia64,$(arch)),$(TBB_ASM.OBJ))
+endif
+
+RML_TBB_DEP= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) concurrent_vector_rml.$(OBJ) tbb_misc_rml.$(OBJ)
+TBB_DEP_NON_RML_TEST= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) $(RML_ASM.OBJ) tbb_misc_rml.$(OBJ)
+TBB_DEP_RML_TEST= $(RML_ASM.OBJ)
+ifeq ($(cfg),debug)
+RML_TBB_DEP+= spin_mutex_rml.$(OBJ) 
+TBB_DEP_RML_TEST+= tbb_misc_rml.$(OBJ) 
+endif
+LIBS += $(LIBDL)
+
+INCLUDES += $(INCLUDE_KEY)$(RML_ROOT)/include $(INCLUDE_KEY).
+T_INCLUDES = $(INCLUDES) $(INCLUDE_KEY)$(tbb_root)/src/test $(INCLUDE_KEY)$(RML_SERVER_ROOT)
+
+# Suppress superfluous warnings for RML compilation
+R_CPLUS_FLAGS =  $(subst DO_ITT_NOTIFY,DO_ITT_NOTIFY=0,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) \
+                $(DEFINE_KEY)TBB_USE_THREADING_TOOLS=0 $(DEFINE_KEY)__TBB_RML_STATIC=1 $(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1
+
+%.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $<
+
+tbb_misc_rml.$(OBJ): version_string.tmp
+
+RML_TEST.OBJ = test_job_automaton.$(OBJ) test_thread_monitor.$(OBJ) test_rml_tbb.$(OBJ) test_rml_omp.$(OBJ) test_rml_mixed.$(OBJ)
+
+$(RML_TBB_DEP): %_rml.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $< 
+
+$(RML_TEST.OBJ): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(T_INCLUDES) $<
+
+ifneq (,$(RML.DEF))
+rml.def: $(RML.DEF)
+       $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(RML.DEF) $(filter $(DEFINE_KEY)%,$(CPLUS_FLAGS)) >rml.def 2>$(NUL) || exit 0"
+
+LIB_LINK_FLAGS += $(EXPORT_KEY)rml.def
+$(RML.DLL): rml.def
+endif
+
+$(RML.DLL): BUILDING_LIBRARY = $(RML.DLL)
+$(RML.DLL): $(RML_TBB_DEP) $(RML_SERVER.OBJ) $(RML.RES) $(RML_NO_VERSION.DLL) $(RML_ASM.OBJ)
+       $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(RML.DLL) $(RML_SERVER.OBJ) $(RML_TBB_DEP) $(RML_ASM.OBJ) $(RML.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS)
+
+ifneq (,$(RML_NO_VERSION.DLL))
+$(RML_NO_VERSION.DLL):
+       echo "INPUT ($(RML.DLL))" > $(RML_NO_VERSION.DLL)
+endif
+
+rml: $(RML.DLL) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ)
+
+#------------------------------------------------------
+# End of rules for making the RML server shared library
+#------------------------------------------------------
+
+#------------------------------------------------------
+# Define rules for making the RML unit tests
+#------------------------------------------------------
+
+add_debug=$(basename $(1))_debug$(suffix $(1))
+cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1))
+
+RML_TESTS = test_job_automaton.$(TEST_EXT) test_thread_monitor.$(TEST_EXT) test_rml_tbb.$(TEST_EXT) test_rml_omp.$(TEST_EXT) test_rml_mixed.$(TEST_EXT) test_rml_omp_c_linkage.$(TEST_EXT)
+
+test_rml_tbb.$(TEST_EXT): test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS)
+
+test_rml_omp.$(TEST_EXT): test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS) 
+
+test_rml_mixed.$(TEST_EXT): test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS) 
+
+rml_omp_stub.$(OBJ): rml_omp_stub.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(WARNING_SUPPRESS) $(T_INCLUDES) $(PIC_KEY) $<
+
+test_rml_omp_c_linkage.$(TEST_EXT): test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ)
+       $(CONLY) $(C_FLAGS) $(OUTPUT_KEY)$@ test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ) $(LIBS) $(LINK_FLAGS)
+
+test_%.$(TEST_EXT): test_%.$(OBJ) $(TBB_DEP_NON_RML_TEST)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS)
+
+### run_cmd is usually empty
+rml_test: $(call cross_suffix,$(RML.DLL)) $(TEST_PREREQUISITE) $(RML_TESTS)
+       $(run_cmd) ./test_job_automaton.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_thread_monitor.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_rml_tbb.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_rml_omp.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_rml_mixed.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_rml_omp_c_linkage.$(TEST_EXT) $(args)
+
+#------------------------------------------------------
+# End of rules for making the TBBMalloc unit tests
+#------------------------------------------------------
+
+# Include automatically generated dependences
+-include *.d
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbb b/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbb
new file mode 100644 (file)
index 0000000..2f49eab
--- /dev/null
@@ -0,0 +1,127 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Define rules for making the TBB shared library.
+#------------------------------------------------------------------------------
+
+tbb_root ?= "$(TBB30_INSTALL_DIR)"
+BUILDING_PHASE=1
+include $(tbb_root)/build/common.inc
+DEBUG_SUFFIX=$(findstring _debug,_$(cfg))
+
+#------------------------------------------------------------
+# Define static pattern rules dealing with .cpp source files
+#------------------------------------------------------------
+$(warning CONFIG: cfg=$(cfg) arch=$(arch) compiler=$(compiler) os=$(tbb_os) runtime=$(runtime))
+
+default_tbb: $(TBB.DLL)
+.PHONY: default_tbb tbbvars clean
+.PRECIOUS: %.$(OBJ)
+
+VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/old $(tbb_root)/src/rml/client
+
+CPLUS_FLAGS += $(PIC_KEY) $(DEFINE_KEY)__TBB_BUILD=1
+
+# A template to switch off strict-ansi for certain compilation units
+# ifeq (1,$(TBB_NOSTRICT))
+# KNOWN_NOSTRICT = <object-file-name>
+# endif
+
+# Object files (that were compiled from C++ code) that gmake up TBB
+TBB_CPLUS.OBJ = concurrent_hash_map.$(OBJ) \
+               concurrent_queue.$(OBJ) \
+               concurrent_vector.$(OBJ) \
+               dynamic_link.$(OBJ) \
+               itt_notify.$(OBJ) \
+               cache_aligned_allocator.$(OBJ) \
+               pipeline.$(OBJ) \
+               queuing_mutex.$(OBJ) \
+               queuing_rw_mutex.$(OBJ) \
+               reader_writer_lock.$(OBJ) \
+               spin_rw_mutex.$(OBJ) \
+               spin_mutex.$(OBJ) \
+               critical_section.$(OBJ) \
+               task.$(OBJ) \
+               tbb_misc.$(OBJ) \
+               mutex.$(OBJ) \
+               recursive_mutex.$(OBJ) \
+               condition_variable.$(OBJ) \
+               tbb_thread.$(OBJ) \
+               concurrent_monitor.$(OBJ) \
+               private_server.$(OBJ) \
+               rml_tbb.$(OBJ) \
+               task_group_context.$(OBJ) \
+               governor.$(OBJ) \
+               market.$(OBJ) \
+               arena.$(OBJ) \
+               scheduler.$(OBJ) \
+               observer_proxy.$(OBJ) \
+               tbb_statistics.$(OBJ) \
+               tbb_main.$(OBJ)
+
+# OLD/Legacy object files for backward binary compatibility
+ifeq (,$(findstring $(DEFINE_KEY)TBB_NO_LEGACY,$(CPLUS_FLAGS)))
+TBB_CPLUS_OLD.OBJ = \
+               concurrent_vector_v2.$(OBJ) \
+               concurrent_queue_v2.$(OBJ) \
+               spin_rw_mutex_v2.$(OBJ) \
+               task_v2.$(OBJ)
+endif
+
+# Object files that gmake up TBB (TBB_ASM.OBJ is platform-specific)
+TBB.OBJ = $(TBB_CPLUS.OBJ) $(TBB_CPLUS_OLD.OBJ) $(TBB_ASM.OBJ)
+
+# Suppress superfluous warnings for TBB compilation
+WARNING_KEY += $(WARNING_SUPPRESS)
+
+include $(tbb_root)/build/common_rules.inc
+
+ifneq (,$(TBB.DEF))
+tbb.def: $(TBB.DEF)
+       $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(TBB.DEF) $(INCLUDES) $(CPLUS_FLAGS) >tbb.def 2>$(NUL) || exit 0"
+
+LIB_LINK_FLAGS += $(EXPORT_KEY)tbb.def
+$(TBB.DLL): tbb.def
+endif
+
+tbbvars.sh:
+       $(MAKE_TBBVARS)
+
+$(TBB.DLL): BUILDING_LIBRARY = $(TBB.DLL)
+$(TBB.DLL): $(TBB.OBJ) $(TBB.RES) tbbvars.sh $(TBB_NO_VERSION.DLL)
+       $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(TBB.DLL) $(TBB.OBJ) $(TBB.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS)
+
+ifneq (,$(TBB_NO_VERSION.DLL))
+$(TBB_NO_VERSION.DLL):
+       echo "INPUT ($(TBB.DLL))" > $(TBB_NO_VERSION.DLL)
+endif
+
+#clean:
+#      $(RM) *.$(OBJ) *.$(DLL) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d core core.*[0-9][0-9]
+
+# Include automatically generated dependences
+-include *.d
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbbmalloc b/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.tbbmalloc
new file mode 100644 (file)
index 0000000..19b85f5
--- /dev/null
@@ -0,0 +1,200 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# default target
+default_malloc: malloc malloc_test
+
+tbb_root ?= $(TBB30_INSTALL_DIR)
+BUILDING_PHASE=1
+TEST_RESOURCE = $(MALLOC.RES)
+include $(tbb_root)/build/common.inc
+DEBUG_SUFFIX=$(findstring _debug,_$(cfg))
+
+MALLOC_ROOT ?= $(tbb_root)/src/tbbmalloc
+MALLOC_SOURCE_ROOT ?= $(MALLOC_ROOT)
+
+VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/test
+VPATH += $(MALLOC_ROOT) $(MALLOC_SOURCE_ROOT)
+
+CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBBMALLOC_NO_IMPLICIT_LINKAGE=1)
+
+include $(tbb_root)/build/common_rules.inc
+
+#------------------------------------------------------
+# Define rules for making the TBBMalloc shared library.
+#------------------------------------------------------
+
+# Object files that make up TBBMalloc
+MALLOC_CPLUS.OBJ = tbbmalloc.$(OBJ) dynamic_link.$(OBJ)
+MALLOC_CUSTOM.OBJ += tbb_misc_malloc.$(OBJ)
+MALLOC_ASM.OBJ = $(TBB_ASM.OBJ)
+
+# MALLOC_CPLUS.OBJ is built in two steps due to Intel Compiler Tracker # C69574
+MALLOC_CPLUS.OBJ += frontend.$(OBJ) backend.$(OBJ) large_objects.$(OBJ) backref.$(OBJ)
+MALLOC.OBJ := $(MALLOC_CPLUS.OBJ) $(MALLOC_ASM.OBJ) $(MALLOC_CUSTOM.OBJ) itt_notify.$(OBJ)
+PROXY.OBJ := proxy.$(OBJ) tbb_function_replacement.$(OBJ)
+M_CPLUS_FLAGS := $(subst $(WARNING_KEY),,$(M_CPLUS_FLAGS)) $(DEFINE_KEY)__TBB_BUILD=1
+M_INCLUDES = $(INCLUDES) $(INCLUDE_KEY)$(MALLOC_ROOT) $(INCLUDE_KEY)$(MALLOC_SOURCE_ROOT)
+
+# Suppress superfluous warnings for TBBmalloc compilation
+$(MALLOC.OBJ): M_CPLUS_FLAGS += $(WARNING_SUPPRESS)
+
+itt_notify.$(OBJ): CPLUS_FLAGS += $(PIC_KEY)
+
+$(PROXY.OBJ): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $<
+
+$(MALLOC_CPLUS.OBJ): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $<
+
+tbb_misc_malloc.$(OBJ): tbb_misc.cpp version_string.tmp
+       $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(OUTPUTOBJ_KEY)$@ $(INCLUDE_KEY). $(INCLUDES) $<
+
+MALLOC_LINK_FLAGS = $(LIB_LINK_FLAGS)
+PROXY_LINK_FLAGS = $(LIB_LINK_FLAGS) 
+
+ifneq (,$(MALLOC.DEF))
+tbbmalloc.def: $(MALLOC.DEF)
+       $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(MALLOC.DEF) $(CPLUS_FLAGS) >tbbmalloc.def 2>$(NUL) || exit 0"
+
+MALLOC_LINK_FLAGS += $(EXPORT_KEY)tbbmalloc.def
+$(MALLOC.DLL): tbbmalloc.def
+endif
+
+$(MALLOC.DLL): BUILDING_LIBRARY = $(MALLOC.DLL)
+$(MALLOC.DLL): $(MALLOC.OBJ) $(MALLOC.RES) $(MALLOC_NO_VERSION.DLL)
+       $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(MALLOC.DLL) $(MALLOC.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(MALLOC_LINK_FLAGS)
+
+ifneq (,$(MALLOCPROXY.DEF))
+tbbmallocproxy.def: $(MALLOCPROXY.DEF)
+       $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(MALLOCPROXY.DEF) $(CPLUS_FLAGS) >tbbmallocproxy.def 2>$(NUL) || exit 0"
+
+PROXY_LINK_FLAGS += $(EXPORT_KEY)tbbmallocproxy.def
+$(MALLOCPROXY.DLL): tbbmallocproxy.def
+endif
+
+ifneq (,$(MALLOCPROXY.DLL))
+$(MALLOCPROXY.DLL): BUILDING_LIBRARY = $(MALLOCPROXY.DLL)
+$(MALLOCPROXY.DLL): $(PROXY.OBJ) $(MALLOCPROXY_NO_VERSION.DLL) $(MALLOC.DLL) $(MALLOC.RES)
+       $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(MALLOCPROXY.DLL) $(PROXY.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(LINK_MALLOC.LIB) $(PROXY_LINK_FLAGS)
+
+malloc: $(MALLOCPROXY.DLL)
+endif
+
+ifneq (,$(MALLOC_NO_VERSION.DLL))
+$(MALLOC_NO_VERSION.DLL):
+       echo "INPUT ($(MALLOC.DLL))" > $(MALLOC_NO_VERSION.DLL)
+endif
+
+ifneq (,$(MALLOCPROXY_NO_VERSION.DLL))
+$(MALLOCPROXY_NO_VERSION.DLL):
+       echo "INPUT ($(MALLOCPROXY.DLL))" > $(MALLOCPROXY_NO_VERSION.DLL)
+endif
+
+malloc: $(MALLOC.DLL) $(MALLOCPROXY.DLL)
+
+malloc_dll: $(MALLOC.DLL) 
+
+malloc_proxy_dll: $(MALLOCPROXY.DLL)
+
+.PHONY: malloc malloc_dll malloc_proxy_dll
+
+#------------------------------------------------------
+# End of rules for making the TBBMalloc shared library
+#------------------------------------------------------
+
+#------------------------------------------------------
+# Define rules for making the TBBMalloc unit tests
+#------------------------------------------------------
+
+add_debug=$(basename $(1))_debug$(suffix $(1))
+cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1))
+
+MALLOC_MAIN_TESTS = test_ScalableAllocator.$(TEST_EXT) \
+                    test_ScalableAllocator_STL.$(TEST_EXT) \
+                    test_malloc_compliance.$(TEST_EXT) \
+                    test_malloc_regression.$(TEST_EXT) \
+                    test_malloc_init_shutdown.$(TEST_EXT)
+MALLOC_OVERLOAD_TESTS =  test_malloc_overload.$(TEST_EXT) test_malloc_overload_proxy.$(TEST_EXT) test_malloc_atexit.$(TEST_EXT)
+
+MALLOC_LIB = $(call cross_suffix,$(MALLOC.LIB))
+MALLOC_PROXY_LIB = $(call cross_suffix,$(MALLOCPROXY.LIB))
+
+ifeq (windows.gcc,$(tbb_os).$(compiler))
+test_malloc_overload.$(TEST_EXT): LIBS += $(MALLOC_PROXY_LIB)
+endif
+
+test_malloc_overload.$(TEST_EXT): test_malloc_overload.cpp
+       $(CPLUS) $(OUTPUT_KEY)$@ $(subst /MT,/MD,$(M_CPLUS_FLAGS)) $(M_INCLUDES) $< $(LIBDL) $(LIBS) $(LINK_FLAGS)
+test_malloc_overload_proxy.$(TEST_EXT): test_malloc_overload.cpp $(MALLOC_PROXY_LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(subst /MT,/MD,$(M_CPLUS_FLAGS)) $(M_INCLUDES) $< $(LIBDL) $(MALLOC_PROXY_LIB) $(LIBS) $(LINK_FLAGS)
+
+test_malloc_whitebox.$(TEST_EXT): test_malloc_whitebox.cpp $(MALLOC_ASM.OBJ) tbb_misc_malloc.$(OBJ)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(M_CPLUS_FLAGS) $(M_INCLUDES) $^ $(LIBS) $(LIBDL) $(LINK_FLAGS)
+
+test_malloc_lib_unload.$(TEST_EXT): test_malloc_lib_unload.cpp
+       $(CPLUS) $(OUTPUT_KEY)$@ $(M_CPLUS_FLAGS) $(M_INCLUDES) $^ $(LIBS) $(LIBDL) $(LINK_FLAGS)
+
+$(MALLOC_MAIN_TESTS): %.$(TEST_EXT): %.$(OBJ) $(MALLOC_LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(MALLOC_LIB) $(LIBS) $(LINK_FLAGS)
+
+ifeq (,$(NO_C_TESTS))
+MALLOC_C_TESTS = test_malloc_pure_c.$(TEST_EXT)
+
+$(MALLOC_C_TESTS): %.$(TEST_EXT): %.$(OBJ) $(MALLOC_LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $^ $(LIBS) $(LINK_FLAGS)
+endif
+
+# Rules for generating a test DLL
+%_dll.$(DLL): %_dll.$(OBJ)
+       $(LIB_LINK_CMD) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $< $(LIBS) $(DYLIB_KEY) $(LIB_LINK_FLAGS)
+
+test_malloc_atexit.$(TEST_EXT): test_malloc_atexit.$(OBJ) test_malloc_atexit_dll.$(DLL)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(MALLOC_PROXY_LIB) $(MALLOC_LIB) test_malloc_atexit_dll.$(LIBEXT) $(LIBS) $(LINK_FLAGS)
+
+MALLOC_TESTS = $(MALLOC_MAIN_TESTS) $(MALLOC_OVERLOAD_TESTS) $(MALLOC_C_TESTS) test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unload.$(TEST_EXT)
+# run_cmd is usually empty
+malloc_test: $(call cross_suffix,$(MALLOC.DLL)) $(TEST_PREREQUISITE) $(MALLOC_TESTS)
+       $(run_cmd) ./test_malloc_atexit.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_malloc_lib_unload.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_malloc_whitebox.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) $(TEST_LAUNCHER) -l $(call cross_suffix,$(MALLOCPROXY.DLL)) test_malloc_overload.$(TEST_EXT) $(args)
+       $(run_cmd) $(TEST_LAUNCHER) test_malloc_overload_proxy.$(TEST_EXT) $(args)
+       $(run_cmd) $(TEST_LAUNCHER) test_malloc_compliance.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_ScalableAllocator.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_ScalableAllocator_STL.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_malloc_regression.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_malloc_init_shutdown.$(TEST_EXT) $(args)
+ifeq (,$(NO_C_TESTS))
+       $(run_cmd) ./test_malloc_pure_c.$(TEST_EXT) $(args)
+endif
+
+#------------------------------------------------------
+# End of rules for making the TBBMalloc unit tests
+#------------------------------------------------------
+
+# Include automatically generated dependences
+-include *.d
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.test b/deal.II/contrib/tbb/tbb30_104oss/build/Makefile.test
new file mode 100644 (file)
index 0000000..90f51a4
--- /dev/null
@@ -0,0 +1,290 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Define rules for making the TBB tests.
+#------------------------------------------------------------------------------
+.PHONY: default test_tbb_plain test_tbb_old clean
+
+default: test_tbb_plain test_tbb_old
+
+tbb_root ?= $(TBB30_INSTALL_DIR)
+BUILDING_PHASE=1
+TEST_RESOURCE = $(TBB.RES)
+include $(tbb_root)/build/common.inc
+DEBUG_SUFFIX=$(findstring _debug,$(call cross_cfg,_$(cfg)))
+
+#------------------------------------------------------------
+# Define static pattern rules dealing with .cpp source files
+#------------------------------------------------------------
+
+VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/rml/client $(tbb_root)/src/old $(tbb_root)/src/test $(tbb_root)/src/perf
+
+CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1)
+
+include $(tbb_root)/build/common_rules.inc
+
+# Rule for generating executable test
+%.$(TEST_EXT): %.$(OBJ) $(TBB.LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(LINK_TBB.LIB) $(LIBS) $(AUX_LIBS) $(LINK_FLAGS)
+
+# Rules for generating a test DLL
+%_dll.$(DLL): %_dll.$(OBJ) $(TBB.LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $< $(LINK_TBB.LIB) $(LIBS) $(LINK_FLAGS) $(DYLIB_KEY)
+.PRECIOUS: %_dll.$(OBJ) %_dll.$(DLL)
+
+# Rules for the tests, which use TBB in a dynamically loadable library
+test_model_plugin.$(TEST_EXT): test_model_plugin.$(OBJ) test_model_plugin_dll.$(DLL)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(LIBDL) $(LIBS) $(LINK_FLAGS)
+
+# tbb_misc.$(OBJ) has to be specified here (instead of harness_inject_scheduler.h) because it carries dependency on version_string.tmp
+TASK_CPP_DEPENDENCIES = $(TBB_ASM.OBJ) tbb_misc.$(OBJ)
+ifeq (,$(codecov))
+    TASK_CPP_DEPENDENCIES += itt_notify.$(OBJ)
+endif
+
+# These executables don't depend on the TBB library, but include core .cpp files directly
+TASK_CPP_DIRECTLY_INCLUDED = test_eh_tasks.$(TEST_EXT) \
+ test_task_leaks.$(TEST_EXT) \
+ test_task_assertions.$(TEST_EXT) \
+ test_fast_random.$(TEST_EXT)
+
+# Necessary to locate version_string.tmp referenced from directly included tbb_misc.cpp
+INCLUDES += $(INCLUDE_KEY).
+
+$(TASK_CPP_DIRECTLY_INCLUDED): WARNING_KEY += $(WARNING_SUPPRESS)
+
+$(TASK_CPP_DIRECTLY_INCLUDED): %.$(TEST_EXT) : %.$(OBJ) $(TASK_CPP_DEPENDENCIES)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $^ $(LIBDL) $(LIBS) $(LINK_FLAGS)
+
+test_tbb_header2.$(OBJ): test_tbb_header.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $(DEFINE_KEY)__TBB_TEST_SECONDARY=1 $< $(OUTPUTOBJ_KEY)$@
+
+# Detecting "multiple definition" linker error using the test that covers the whole library
+test_tbb_header.$(TEST_EXT): test_tbb_header.$(OBJ) test_tbb_header2.$(OBJ) $(TBB.LIB)
+       $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_tbb_header.$(OBJ) test_tbb_header2.$(OBJ) $(LINK_TBB.LIB) $(LIBS) $(LINK_FLAGS)
+
+# The main list of TBB tests
+TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT)   \
+       test_aligned_space.$(TEST_EXT)               \
+       test_atomic.$(TEST_EXT)                      \
+       test_blocked_range.$(TEST_EXT)               \
+       test_blocked_range2d.$(TEST_EXT)             \
+       test_blocked_range3d.$(TEST_EXT)             \
+       test_concurrent_queue.$(TEST_EXT)            \
+       test_concurrent_vector.$(TEST_EXT)           \
+       test_concurrent_unordered.$(TEST_EXT)        \
+       test_concurrent_hash_map.$(TEST_EXT)         \
+       test_enumerable_thread_specific.$(TEST_EXT)  \
+       test_handle_perror.$(TEST_EXT)               \
+       test_halt.$(TEST_EXT)                        \
+       test_lambda.$(TEST_EXT)                      \
+       test_model_plugin.$(TEST_EXT)                \
+       test_mutex.$(TEST_EXT)                       \
+       test_mutex_native_threads.$(TEST_EXT)        \
+       test_rwm_upgrade_downgrade.$(TEST_EXT)       \
+       test_cache_aligned_allocator_STL.$(TEST_EXT) \
+       test_cache_aligned_allocator.$(TEST_EXT)     \
+       test_parallel_for.$(TEST_EXT)                \
+       test_parallel_reduce.$(TEST_EXT)             \
+       test_parallel_sort.$(TEST_EXT)               \
+       test_parallel_scan.$(TEST_EXT)               \
+       test_parallel_while.$(TEST_EXT)              \
+       test_parallel_do.$(TEST_EXT)                 \
+       test_pipeline.$(TEST_EXT)                    \
+       test_pipeline_with_tbf.$(TEST_EXT)           \
+       test_parallel_pipeline.$(TEST_EXT)           \
+       test_task_scheduler_init.$(TEST_EXT)         \
+       test_task_scheduler_observer.$(TEST_EXT)     \
+       test_task.$(TEST_EXT)                        \
+       test_tbb_thread.$(TEST_EXT)                  \
+       test_std_thread.$(TEST_EXT)                  \
+       test_tick_count.$(TEST_EXT)                  \
+       test_inits_loop.$(TEST_EXT)                  \
+       test_yield.$(TEST_EXT)                       \
+       test_eh_algorithms.$(TEST_EXT)               \
+       test_parallel_invoke.$(TEST_EXT)             \
+       test_task_group.$(TEST_EXT)                  \
+       test_ittnotify.$(TEST_EXT)                   \
+       test_parallel_for_each.$(TEST_EXT)           \
+       test_tbb_header.$(TEST_EXT)                  \
+       test_combinable.$(TEST_EXT)                  \
+       test_task_auto_init.$(TEST_EXT)              \
+       test_concurrent_monitor.$(TEST_EXT)          \
+       test_critical_section.$(TEST_EXT)            \
+       test_semaphore.$(TEST_EXT)            \
+       test_reader_writer_lock.$(TEST_EXT)          \
+       test_tbb_condition_variable.$(TEST_EXT)      \
+       test_intrusive_list.$(TEST_EXT)              \
+       test_cilk_interop.$(TEST_EXT)              \
+       test_tbb_version.$(TEST_EXT)                 # insert new files right above
+
+ifdef OPENMP_FLAG
+       TEST_TBB_PLAIN.EXE += test_tbb_openmp
+test_openmp.$(TEST_EXT): test_openmp.cpp
+       $(CPLUS) $(OPENMP_FLAG) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(INCLUDES) $< $(LIBS) $(LINK_TBB.LIB) $(LINK_FLAGS)
+.PHONY: test_tbb_openmp
+test_tbb_openmp: $(TEST_PREREQUISITE) test_openmp.$(TEST_EXT)
+       $(run_cmd) ./test_openmp.$(TEST_EXT) 1:4
+
+endif
+
+$(TEST_TBB_PLAIN.EXE): WARNING_KEY += $(TEST_WARNING_KEY)
+
+# Run tests that are in TASK_CPP_DIRECTLY_INCLUDED and TEST_TBB_PLAIN.EXE
+# The test are ordered so that simpler components are tested first.
+# If a component Y uses component X, then tests for Y should come after tests for X.
+# Note that usually run_cmd is empty, and tests run directly
+test_tbb_plain: $(TEST_PREREQUISITE) $(TASK_CPP_DIRECTLY_INCLUDED) $(TEST_TBB_PLAIN.EXE)
+       $(run_cmd) ./test_tbb_version.$(TEST_EXT) $(args)
+        # Checking TBB version first to make sure the following testing has anything in it
+       $(run_cmd) ./test_assembly.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_atomic.$(TEST_EXT) $(args)
+        # Yes, 4:8 is intended on the next line. 
+       $(run_cmd) ./test_yield.$(TEST_EXT) $(args) 4:8
+       $(run_cmd) ./test_handle_perror.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_task_auto_init.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_task_scheduler_init.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_task_scheduler_observer.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_task_assertions.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_task.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_task_leaks.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_cache_aligned_allocator.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_cache_aligned_allocator_STL.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_blocked_range.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_blocked_range2d.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_blocked_range3d.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_for.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_sort.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_aligned_space.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_parallel_reduce.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_scan.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_while.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_do.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_inits_loop.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_lambda.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_mutex.$(TEST_EXT) $(args) 1:3
+       $(run_cmd) ./test_mutex_native_threads.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_rwm_upgrade_downgrade.$(TEST_EXT) $(args) 4
+        # Yes, 4:8 is intended on the next line. 
+       $(run_cmd) ./test_halt.$(TEST_EXT) $(args) 4:8
+       $(run_cmd) ./test_pipeline.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_pipeline_with_tbf.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_parallel_pipeline.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_tick_count.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_queue.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_vector.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_unordered.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_concurrent_hash_map.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_enumerable_thread_specific.$(TEST_EXT) $(args) 0:4
+       $(run_cmd) ./test_combinable.$(TEST_EXT) $(args) 0:4
+    #  $(run_cmd) ./test_model_plugin.$(TEST_EXT) $(args) 4
+       $(run_cmd) ./test_eh_tasks.$(TEST_EXT) $(args) 2:4
+       $(run_cmd) ./test_eh_algorithms.$(TEST_EXT) $(args) 2:4
+       $(run_cmd) ./test_tbb_thread.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_std_thread.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_parallel_invoke.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_task_group.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_ittnotify.$(TEST_EXT) $(args) 2:2
+       $(run_cmd) ./test_parallel_for_each.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_tbb_header.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_concurrent_monitor.$(TEST_EXT) $(args) 6:8
+       $(run_cmd) ./test_critical_section.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_semaphore.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_reader_writer_lock.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_tbb_condition_variable.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_intrusive_list.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_cilk_interop.$(TEST_EXT) $(args)
+       $(run_cmd) ./test_fast_random.$(TEST_EXT) $(args) 1:16
+
+CPLUS_FLAGS_DEPRECATED = $(DEFINE_KEY)TBB_DEPRECATED=1 $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS)
+
+TEST_TBB_OLD.OBJ = test_concurrent_vector_v2.$(OBJ) test_concurrent_queue_v2.$(OBJ) test_mutex_v2.$(OBJ)
+
+TEST_TBB_DEPRECATED.OBJ = test_concurrent_queue_deprecated.$(OBJ) \
+       test_concurrent_vector_deprecated.$(OBJ) \
+
+
+# For deprecated files, we don't mind warnings etc., thus compilation rules are most relaxed
+$(TEST_TBB_OLD.OBJ): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS_DEPRECATED) $(INCLUDES) $<
+
+%_deprecated.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(CPLUS_FLAGS_DEPRECATED) $(INCLUDES) $<
+
+TEST_TBB_OLD.EXE = $(subst .$(OBJ),.$(TEST_EXT),$(TEST_TBB_OLD.OBJ) $(TEST_TBB_DEPRECATED.OBJ))
+
+ifeq (,$(NO_LEGACY_TESTS))
+test_tbb_old: $(TEST_PREREQUISITE) $(TEST_TBB_OLD.EXE)
+       $(run_cmd) ./test_concurrent_vector_v2.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_vector_deprecated.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_queue_v2.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_concurrent_queue_deprecated.$(TEST_EXT) $(args) 1:4
+       $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 1
+       $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 2
+       $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 4
+else
+test_tbb_old:
+       @echo Legacy tests skipped
+endif
+
+ifneq (,$(codecov))
+codecov_gen:
+       profmerge
+       codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt
+endif
+
+test_% debug_%: test_%.$(TEST_EXT) $(TEST_PREREQUISITE)
+ifeq (,$(repeat))
+       $(run_cmd) ./$< $(args)
+else
+ifeq (windows,$(tbb_os))
+       for /L %%i in (1,1,$(repeat)) do echo %%i of $(repeat): && $(run_cmd) $< $(args)
+else
+       for ((i=1;i<=$(repeat);++i)); do echo $$i of $(repeat): && $(run_cmd) ./$< $(args); done
+endif
+endif # repeat
+ifneq (,$(codecov))
+       profmerge
+       codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt
+endif
+
+time_%: time_%.$(TEST_EXT) $(TEST_PREREQUISITE)
+       $(run_cmd) ./$< $(args)
+
+
+perf_%: AUX_LIBS = perf_dll.$(LIBEXT)
+perf_%: perf_dll.$(DLL) perf_%.$(TEST_EXT)
+       $(run_cmd) ./$@.$(TEST_EXT) $(args)
+
+clean_%: 
+       $(RM) $*.$(OBJ) $*.exe $*.$(DLL) $*.$(LIBEXT) $*.res $*.map $*.ilk $*.pdb $*.exp $*.*manifest $*.tmp $*.d
+
+clean:
+       $(RM) *.$(OBJ) *.exe *.$(DLL) *.$(LIBEXT) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d pgopti.* *.dyn core core.*[0-9][0-9]
+
+# Include automatically generated dependences
+-include *.d
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.gcc.inc
new file mode 100644 (file)
index 0000000..83e612e
--- /dev/null
@@ -0,0 +1,100 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall
+DYLIB_KEY = -shared
+LIBDL = -ldl
+
+TBB_NOSTRICT = 1
+
+CPLUS = g++ 
+CONLY = gcc
+LIB_LINK_FLAGS = -shared
+LIBS = -lpthread -lrt -ldl 
+C_FLAGS = $(CPLUS_FLAGS) -x c
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -O2 -DUSE_PTHREAD
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD
+endif
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (ia64,$(arch))
+# Position-independent code (PIC) is a must for IA-64
+    CPLUS_FLAGS += $(PIC_KEY)
+endif 
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32
+    LIB_LINK_FLAGS += -m32
+endif 
+
+# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify)
+ifeq (sparc,$(arch))
+    CPLUS_FLAGS    += -mcpu=v9 -m64
+    LIB_LINK_FLAGS += -mcpu=v9 -m64
+endif 
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-gas
+ifeq (ia64,$(arch))
+    ASM=ias
+    TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o
+endif 
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.inc b/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.inc
new file mode 100644 (file)
index 0000000..60cc44f
--- /dev/null
@@ -0,0 +1,91 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifndef arch
+        arch:=$(shell uname -p)
+        ifeq ($(arch),i386)
+            ifeq ($(shell isainfo -b),64)
+                arch:=intel64
+            else
+                arch:=ia32
+            endif
+        endif
+        export arch
+# For non-IA systems running Sun OS, 'arch' will contain whatever is printed by uname -p.
+# In particular, for SPARC architecture it will contain "sparc".
+endif
+
+ifndef runtime
+        gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//')
+        os_version:=$(shell uname -r)
+        os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
+        export runtime:=cc$(gcc_version)_kernel$(os_kernel_version)
+endif
+
+ifeq ($(arch),sparc)
+        native_compiler := gcc
+        export compiler ?= gcc
+else
+        native_compiler := suncc
+        export compiler ?= suncc
+endif
+# debugger ?= gdb
+
+CMD=$(SHELL) -c
+CWD=$(shell pwd)
+RM?=rm -f
+RD?=rmdir
+MD?=mkdir -p
+NUL= /dev/null
+SLASH=/
+MAKE_VERSIONS=bash $(tbb_root)/build/version_info_sunos.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp
+MAKE_TBBVARS=bash $(tbb_root)/build/generate_tbbvars.sh
+
+ifdef LD_LIBRARY_PATH
+        export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH)
+else
+        export LD_LIBRARY_PATH := .
+endif
+
+####### Build settings ########################################################
+
+OBJ = o
+DLL = so
+LIBEXT=so
+
+TBB.DEF = 
+TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = $(TBB.DLL)
+LINK_TBB.LIB = $(TBB.LIB)
+
+MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = $(MALLOC.DLL)
+
+MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL)
+
+TBB_NOSTRICT=1
+
+TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.suncc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/SunOS.suncc.inc
new file mode 100644 (file)
index 0000000..dab47f1
--- /dev/null
@@ -0,0 +1,98 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -xMMD -errtags
+PREPROC_ONLY = -E -xMMD
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -KPIC
+DYLIB_KEY = -G
+LIBDL = -ldl
+# WARNING_AS_ERROR_KEY = -errwarn=%all
+WARNING_AS_ERROR_KEY = Warning as error
+WARNING_SUPPRESS = -erroff=unassigned,attrskipunsup,badargtype2w,badbinaryopw,wbadasg,wvarhidemem
+tbb_strict=0
+
+TBB_NOSTRICT = 1
+
+CPLUS = CC
+CONLY = cc
+LIB_LINK_FLAGS = -G -R . -M$(tbb_root)/build/suncc.map.pause
+LINK_FLAGS += -M$(tbb_root)/build/suncc.map.pause
+LIBS = -lpthread -lrt -R .
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -mt -xO2 -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS)
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -mt -DTBB_USE_DEBUG -g -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS)
+endif
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    ASM_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32
+    LIB_LINK_FLAGS += -m32
+endif 
+
+# TODO: verify whether -m64 implies V9 on relevant Sun Studio versions
+#       (those that handle gcc assembler syntax)
+ifeq (sparc,$(arch))
+    CPLUS_FLAGS    += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+export TBB_CUSTOM_VARS_SH=export CXXFLAGS="-I$${TBB30_INSTALL_DIR}/include -library=stlport4 $(CXXFLAGS) -M$${TBB30_INSTALL_DIR}/build/suncc.map.pause"
+export TBB_CUSTOM_VARS_CSH=setenv CXXFLAGS "-I$${TBB30_INSTALL_DIR}/include -library=stlport4 $(CXXFLAGS) -M$${TBB30_INSTALL_DIR}/build/suncc.map.pause"
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-fbe
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+M_INCLUDES = $(INCLUDES) -I$(MALLOC_ROOT) -I$(MALLOC_SOURCE_ROOT)
+M_CPLUS_FLAGS = $(CPLUS_FLAGS)
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/codecov.txt b/deal.II/contrib/tbb/tbb30_104oss/build/codecov.txt
new file mode 100644 (file)
index 0000000..e22f805
--- /dev/null
@@ -0,0 +1,7 @@
+src/tbb
+src/tbbmalloc
+include/tbb
+src/rml/server
+src/rml/client
+src/rml/include
+source/malloc
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/common.inc b/deal.II/contrib/tbb/tbb30_104oss/build/common.inc
new file mode 100644 (file)
index 0000000..c9e6b23
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifndef tbb_os
+
+  # Windows sets environment variable OS; for other systems, ask uname
+  ifeq ($(OS),)
+    OS:=$(shell uname)
+    ifeq ($(OS),)
+      $(error "Cannot detect operating system")
+    endif
+    export tbb_os=$(OS)
+  endif
+
+  ifeq ($(OS), Windows_NT)
+    export tbb_os=windows
+  endif
+  ifeq ($(OS), Linux)
+    export tbb_os=linux
+  endif
+  ifeq ($(OS), Darwin)
+    export tbb_os=macos
+  endif
+
+endif # !tbb_os
+
+ifeq ($(wildcard $(tbb_root)/build/$(tbb_os).inc),)
+  $(error "$(tbb_os)" is not supported. Add build/$(tbb_os).inc file with os-specific settings )
+endif
+
+# detect arch and runtime versions, provide common os-specific definitions
+include $(tbb_root)/build/$(tbb_os).inc
+
+ifeq ($(arch),)
+ $(error Architecture not detected)
+endif
+ifeq ($(runtime),)
+ $(error Runtime version not detected)
+endif
+ifeq ($(wildcard $(tbb_root)/build/$(tbb_os).$(compiler).inc),)
+  $(error Compiler "$(compiler)" is not supported on $(tbb_os). Add build/$(tbb_os).$(compiler).inc file with compiler-specific settings )
+endif
+
+ifdef target
+ ifeq ($(wildcard $(tbb_root)/build/$(target).inc),)
+  $(error "$(target)" is not supported. Add build/$(target).inc file)
+ endif
+ include $(tbb_root)/build/$(target).inc
+endif
+
+# Support for running debug tests to release library and vice versa
+flip_cfg=$(subst _flipcfg,_release,$(subst _release,_debug,$(subst _debug,_flipcfg,$(1))))
+cross_cfg = $(if $(crosstest),$(call flip_cfg,$(1)),$(1))
+
+ifdef BUILDING_PHASE
+ # Setting default configuration to release
+ cfg?=release
+ # No lambas or other C++0x extensions by default for compilers that implement them as experimental features
+ lambdas ?= 0
+ cpp0x ?= 0
+ # include compiler-specific build configurations
+ -include $(tbb_root)/build/$(tbb_os).$(compiler).inc
+ ifdef extra_inc
+  -include $(tbb_root)/build/$(extra_inc)
+ endif
+endif
+ifneq ($(BUILDING_PHASE),1)
+ # definitions for top-level Makefiles
+ origin_build_dir:=$(origin tbb_build_dir)
+ tbb_build_dir?=$(tbb_root)$(SLASH)build
+ tbb_build_prefix?=$(tbb_os)_$(arch)_$(compiler)_$(runtime)
+ work_dir=$(tbb_build_dir)$(SLASH)$(tbb_build_prefix)
+ ifneq ($(BUILDING_PHASE),0)
+  work_dir:=$(work_dir)
+  # assign new value for tbb_root if path is not absolute (the filter keeps only /* paths)
+  ifeq ($(filter /% $(SLASH)%, $(subst :, ,$(tbb_root)) ),)
+   ifeq ($(origin_build_dir),undefined)
+    override tbb_root:=../..
+   else
+    override tbb_root:=$(CWD)/$(tbb_root)
+   endif
+  endif
+  export tbb_root
+ endif # BUILDING_PHASE != 0
+endif  # BUILDING_PHASE != 1
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/common_rules.inc b/deal.II/contrib/tbb/tbb30_104oss/build/common_rules.inc
new file mode 100644 (file)
index 0000000..95dbad2
--- /dev/null
@@ -0,0 +1,126 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifeq ($(tbb_strict),1)
+  ifeq ($(WARNING_AS_ERROR_KEY),)
+    $(error WARNING_AS_ERROR_KEY is empty)
+  endif
+  # Do not remove line below!
+  WARNING_KEY += $(WARNING_AS_ERROR_KEY)
+endif
+
+ifndef TEST_EXT
+    TEST_EXT = exe
+endif
+
+.PRECIOUS: %.$(OBJ) %.$(TEST_EXT) %.res
+
+INCLUDES += $(INCLUDE_KEY)$(tbb_root)/src $(INCLUDE_KEY)$(tbb_root)/src/rml/include $(INCLUDE_KEY)$(tbb_root)/include
+
+CPLUS_FLAGS += $(WARNING_KEY) $(CXXFLAGS)
+LINK_FLAGS += $(LDFLAGS)
+LIB_LINK_FLAGS += $(LDFLAGS)
+CPLUS_FLAGS_NOSTRICT = $(subst -strict-ansi,-ansi,$(CPLUS_FLAGS))
+
+LIB_LINK_CMD ?= $(CPLUS) $(PIC_KEY)
+ifeq ($(origin LIB_OUTPUT_KEY), undefined)
+    LIB_OUTPUT_KEY = $(OUTPUT_KEY)
+endif
+ifeq ($(origin LIB_LINK_LIBS), undefined)
+    LIB_LINK_LIBS = $(LIBDL) $(LIBS)
+endif
+
+CONLY ?= $(CPLUS)
+
+# The most generic rules
+%.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $<
+
+%.$(OBJ): %.c
+       $(CONLY) $(COMPILE_ONLY) $(C_FLAGS) $(INCLUDES) $<
+
+%.$(OBJ): %.asm
+       $(ASM) $(ASM_FLAGS) $<
+
+%.$(OBJ): %.s
+       cpp <$< | grep -v '^#' >$*.tmp
+       $(ASM) $(ASM_FLAGS) -o $@ $*.tmp
+       rm $*.tmp
+
+# Rule for generating .E file if needed for visual inspection
+%.E: %.cpp
+       $(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $(PREPROC_ONLY) $< >$@
+
+# TODO Rule for generating .asm file if needed for visual inspection
+%.asm: %.cpp
+       $(CPLUS) /c /Fa $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $<
+
+# TODO Rule for generating .s file if needed for visual inspection
+%.s: %.cpp
+       $(CPLUS) -S $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $<
+
+# Customizations
+
+ifeq (1,$(TBB_NOSTRICT))
+# GNU 3.2.3 headers have a ISO syntax that is rejected by Intel compiler in -strict-ansi mode.
+# The Mac uses gcc, so the list is empty for that platform.
+# The files below need the -strict-ansi flag downgraded to -ansi to compile
+
+$(KNOWN_NOSTRICT): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS_NOSTRICT) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $<
+endif
+
+$(KNOWN_WARNINGS): %.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(CXX_ONLY_FLAGS) $(INCLUDES) $<
+
+tbb_misc.$(OBJ): tbb_misc.cpp version_string.tmp
+       $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDE_KEY). $(INCLUDES) $<
+
+tbb_misc.E: tbb_misc.cpp version_string.tmp
+       $(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDE_KEY). $(INCLUDES) $(PREPROC_ONLY) $< >$@
+
+%.res: %.rc version_string.tmp $(TBB.MANIFEST)
+       rc /Fo$@ $(INCLUDES) $(filter /D%,$(CPLUS_FLAGS)) $<
+
+ifneq (,$(TBB.MANIFEST))
+$(TBB.MANIFEST):
+       cmd /C "echo #include ^<stdio.h^> >tbbmanifest.c"
+       cmd /C "echo int main(){return 0;} >>tbbmanifest.c"
+       cl /nologo $(C_FLAGS) tbbmanifest.c
+
+version_string.tmp: $(TBB.MANIFEST)
+       $(MAKE_VERSIONS)
+       cmd /C "echo #define TBB_MANIFEST 1 >> version_string.tmp"
+
+else
+version_string.tmp:
+       $(MAKE_VERSIONS)
+endif
+
+
+# Rules for generating a test DLL
+%_dll.$(OBJ): %.cpp
+       $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $(DEFINE_KEY)_USRDLL $(INCLUDES) $<
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/detect.js b/deal.II/contrib/tbb/tbb30_104oss/build/detect.js
new file mode 100644 (file)
index 0000000..8e90dc0
--- /dev/null
@@ -0,0 +1,129 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+function doWork() {
+               var WshShell = WScript.CreateObject("WScript.Shell");
+
+               var fso = new ActiveXObject("Scripting.FileSystemObject");
+
+               var tmpExec;
+
+               if ( WScript.Arguments.Count() > 1 && WScript.Arguments(1) == "gcc" ) {
+                       if ( WScript.Arguments(0) == "/arch" ) {
+                               WScript.Echo( "ia32" );
+                       }
+                       else if ( WScript.Arguments(0) == "/runtime" ) {
+                               WScript.Echo( "mingw" );
+                       }
+                       return;
+               }
+
+               //Compile binary
+               tmpExec = WshShell.Exec("cmd /c echo int main(){return 0;} >detect.c");
+               while ( tmpExec.Status == 0 ) {
+                       WScript.Sleep(100);
+               }
+               
+               tmpExec = WshShell.Exec("cl /MD detect.c /link /MAP");
+               while ( tmpExec.Status == 0 ) {
+                       WScript.Sleep(100);
+               }
+
+               if ( WScript.Arguments(0) == "/arch" ) {
+                       //read compiler banner
+                       var clVersion = tmpExec.StdErr.ReadAll();
+                       
+                       //detect target architecture
+                       var intel64=/AMD64|EM64T|x64/mgi;
+                       var ia64=/IA-64|Itanium/mgi;
+                       var ia32=/80x86/mgi;
+                       if ( clVersion.match(intel64) ) {
+                               WScript.Echo( "intel64" );
+                       } else if ( clVersion.match(ia64) ) {
+                               WScript.Echo( "ia64" );
+                       } else if ( clVersion.match(ia32) ) {
+                               WScript.Echo( "ia32" );
+                       } else {
+                               WScript.Echo( "unknown" );
+                       }
+               }
+
+               if ( WScript.Arguments(0) == "/runtime" ) {
+                       //read map-file
+                       var map = fso.OpenTextFile("detect.map", 1, 0);
+                       var mapContext = map.readAll();
+                       map.Close();
+                       
+                       //detect runtime
+                       var vc71=/MSVCR71\.DLL/mgi;
+                       var vc80=/MSVCR80\.DLL/mgi;
+                       var vc90=/MSVCR90\.DLL/mgi;
+                       var vc100=/MSVCR100\.DLL/mgi;
+                       var psdk=/MSVCRT\.DLL/mgi;
+                       if ( mapContext.match(vc71) ) {
+                               WScript.Echo( "vc7.1" );
+                       } else if ( mapContext.match(vc80) ) {
+                               WScript.Echo( "vc8" );
+                       } else if ( mapContext.match(vc90) ) {
+                               WScript.Echo( "vc9" );
+                       } else if ( mapContext.match(vc100) ) {
+                               WScript.Echo( "vc10" );
+                       } else if ( mapContext.match(psdk) ) {
+                               // Our current naming convention assumes vc7.1 for 64-bit Windows PSDK
+                               WScript.Echo( "vc7.1" ); 
+                       } else {
+                               WScript.Echo( "unknown" );
+                       }
+               }
+
+               // delete intermediate files
+               if ( fso.FileExists("detect.c") )
+                       fso.DeleteFile ("detect.c", false);
+               if ( fso.FileExists("detect.obj") )
+                       fso.DeleteFile ("detect.obj", false);
+               if ( fso.FileExists("detect.map") )
+                       fso.DeleteFile ("detect.map", false);
+               if ( fso.FileExists("detect.exe") )
+                       fso.DeleteFile ("detect.exe", false);
+               if ( fso.FileExists("detect.exe.manifest") )
+                       fso.DeleteFile ("detect.exe.manifest", false);
+}
+
+if ( WScript.Arguments.Count() > 0 ) {
+       
+       try {
+               doWork();
+       } catch( error )
+       {
+               WScript.Echo( "unknown" );
+               WScript.Quit( 0 );
+       }
+
+} else {
+
+       WScript.Echo( "/arch or /runtime should be set" );
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.bat b/deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.bat
new file mode 100644 (file)
index 0000000..9fd3a51
--- /dev/null
@@ -0,0 +1,74 @@
+@echo off
+REM
+REM Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+REM
+REM This file is part of Threading Building Blocks.
+REM
+REM Threading Building Blocks is free software; you can redistribute it
+REM and/or modify it under the terms of the GNU General Public License
+REM version 2 as published by the Free Software Foundation.
+REM
+REM Threading Building Blocks is distributed in the hope that it will be
+REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+REM GNU General Public License for more details.
+REM
+REM You should have received a copy of the GNU General Public License
+REM along with Threading Building Blocks; if not, write to the Free Software
+REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+REM
+REM As a special exception, you may use this file as part of a free software
+REM library without restriction.  Specifically, if other files instantiate
+REM templates or use macros or inline functions from this file, or you compile
+REM this file and link it with other files to produce an executable, this
+REM file does not by itself cause the resulting executable to be covered by
+REM the GNU General Public License.  This exception does not however
+REM invalidate any other reasons why the executable file might be covered by
+REM the GNU General Public License.
+REM
+setlocal
+for %%D in ("%tbb_root%") do set actual_root=%%~fD
+set fslash_root=%actual_root:\=/%
+set bin_dir=%CD%
+set fslash_bin_dir=%bin_dir:\=/%
+set _INCLUDE=INCLUDE& set _LIB=LIB
+if not x%UNIXMODE%==x set _INCLUDE=CPATH& set _LIB=LIBRARY_PATH
+
+if exist tbbvars.bat goto skipbat
+echo Generating local tbbvars.bat
+echo @echo off>tbbvars.bat
+echo SET TBB30_INSTALL_DIR=%actual_root%>>tbbvars.bat
+echo SET TBB_ARCH_PLATFORM=%arch%\%runtime%>>tbbvars.bat
+echo SET TBB_TARGET_ARCH=%arch%>>tbbvars.bat
+echo SET %_INCLUDE%=%%TBB30_INSTALL_DIR%%\include;%%%_INCLUDE%%%>>tbbvars.bat
+echo SET %_LIB%=%bin_dir%;%%%_LIB%%%>>tbbvars.bat
+echo SET PATH=%bin_dir%;%%PATH%%>>tbbvars.bat
+if not x%UNIXMODE%==x echo SET LD_LIBRARY_PATH=%bin_dir%;%%LD_LIBRARY_PATH%%>>tbbvars.bat
+:skipbat
+
+if exist tbbvars.sh goto skipsh
+echo Generating local tbbvars.sh
+echo #!/bin/sh>tbbvars.sh
+echo export TBB30_INSTALL_DIR="%fslash_root%">>tbbvars.sh
+echo export TBB_ARCH_PLATFORM="%arch%\%runtime%">>tbbvars.sh
+echo export TBB_TARGET_ARCH="%arch%">>tbbvars.sh
+echo export %_INCLUDE%="${TBB30_INSTALL_DIR}/include;$%_INCLUDE%">>tbbvars.sh
+echo export %_LIB%="%fslash_bin_dir%;$%_LIB%">>tbbvars.sh
+echo export PATH="%fslash_bin_dir%;$PATH">>tbbvars.sh
+if not x%UNIXMODE%==x echo export LD_LIBRARY_PATH="%fslash_bin_dir%;$LD_LIBRARY_PATH">>tbbvars.sh
+:skipsh
+
+if exist tbbvars.csh goto skipcsh
+echo Generating local tbbvars.csh
+echo #!/bin/csh>tbbvars.csh
+echo setenv TBB30_INSTALL_DIR "%actual_root%">>tbbvars.csh
+echo setenv TBB_ARCH_PLATFORM "%arch%\%runtime%">>tbbvars.csh
+echo setenv TBB_TARGET_ARCH "%arch%">>tbbvars.csh
+echo setenv %_INCLUDE% "${TBB30_INSTALL_DIR}\include;$%_INCLUDE%">>tbbvars.csh
+echo setenv %_LIB% "%bin_dir%;$%_LIB%">>tbbvars.csh
+echo setenv PATH "%bin_dir%;$PATH">>tbbvars.csh
+if not x%UNIXMODE%==x echo setenv LD_LIBRARY_PATH "%bin_dir%;$LD_LIBRARY_PATH">>tbbvars.csh
+:skipcsh
+
+endlocal
+exit
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.sh b/deal.II/contrib/tbb/tbb30_104oss/build/generate_tbbvars.sh
new file mode 100644 (file)
index 0000000..484719f
--- /dev/null
@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# Script used to generate tbbvars.[c]sh scripts
+bin_dir="$PWD"  # 
+cd "$tbb_root"  # keep this comments here
+tbb_root="$PWD" # to make it unsensible
+cd "$bin_dir"   # to EOL encoding
+[ "`uname`" = "Darwin" ] && dll_path="DYLD_LIBRARY_PATH" || dll_path="LD_LIBRARY_PATH" #
+[ -f ./tbbvars.sh ] || cat >./tbbvars.sh <<EOF
+#!/bin/bash
+export TBB30_INSTALL_DIR="${tbb_root}" #
+tbb_bin="${bin_dir}" #
+if [ -z "\$CPATH" ]; then #
+    export CPATH="\${TBB30_INSTALL_DIR}/include" #
+else #
+    export CPATH="\${TBB30_INSTALL_DIR}/include:\$CPATH" #
+fi #
+if [ -z "\$LIBRARY_PATH" ]; then #
+    export LIBRARY_PATH="\${tbb_bin}" #
+else #
+    export LIBRARY_PATH="\${tbb_bin}:\$LIBRARY_PATH" #
+fi #
+if [ -z "\$${dll_path}" ]; then #
+    export ${dll_path}="\${tbb_bin}" #
+else #
+    export ${dll_path}="\${tbb_bin}:\$${dll_path}" #
+fi #
+${TBB_CUSTOM_VARS_SH} #
+EOF
+[ -f ./tbbvars.csh ] || cat >./tbbvars.csh <<EOF
+#!/bin/csh
+setenv TBB30_INSTALL_DIR "${tbb_root}" #
+setenv tbb_bin "${bin_dir}" #
+if (! \$?CPATH) then #
+    setenv CPATH "\${TBB30_INSTALL_DIR}/include" #
+else #
+    setenv CPATH "\${TBB30_INSTALL_DIR}/include:\$CPATH" #
+endif #
+if (! \$?LIBRARY_PATH) then #
+    setenv LIBRARY_PATH "\${tbb_bin}" #
+else #
+    setenv LIBRARY_PATH "\${tbb_bin}:\$LIBRARY_PATH" #
+endif #
+if (! \$?${dll_path}) then #
+    setenv ${dll_path} "\${tbb_bin}" #
+else #
+    setenv ${dll_path} "\${tbb_bin}:\$${dll_path}" #
+endif #
+${TBB_CUSTOM_VARS_CSH} #
+EOF
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/index.html b/deal.II/contrib/tbb/tbb30_104oss/build/index.html
new file mode 100644 (file)
index 0000000..ef17214
--- /dev/null
@@ -0,0 +1,230 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+This directory contains the internal Makefile infrastructure for Threading Building Blocks.
+
+<P>
+See below for how to <A HREF=#build>build</A> TBB and how to <A HREF=#port>port</A> TBB
+to a new platform, operating system or architecture.
+</P>
+
+<H2>Files</H2>
+The files here are not intended to be used directly.  See below for usage.
+<DL>
+<DT><A HREF="Makefile.tbb">Makefile.tbb</A>
+<DD>Main Makefile to build the TBB library.
+    Invoked via 'make tbb' from <A HREF=../Makefile>top-level Makefile</A>.
+<DT><A HREF="Makefile.tbbmalloc">Makefile.tbbmalloc</A>
+<DD>Main Makefile to build the TBB scalable memory allocator library as well as its tests.
+    Invoked via 'make tbbmalloc' from <A HREF=../Makefile>top-level Makefile</A>.
+<DT><A HREF="Makefile.test">Makefile.test</A>
+<DD>Main Makefile to build and run the tests for the TBB library.
+    Invoked via 'make test' from <A HREF=../Makefile>top-level Makefile</A>.
+<DT><A HREF="common.inc">common.inc</A>
+<DD>Main common included Makefile that includes OS-specific and compiler-specific Makefiles.
+<DT>&lt;os&gt;.inc
+<DD>OS-specific Makefile for a particular &lt;os&gt;.
+<DT>&lt;os&gt;.&lt;compiler&gt;.inc
+<DD>Compiler-specific Makefile for a particular &lt;os&gt; / &lt;compiler&gt; combination.
+<DT>*.sh
+<DD>Infrastructure utilities for Linux*, Mac OS* X, and UNIX*-related systems.
+<DT>*.js, *.bat
+<DD>Infrastructure utilities for Windows* systems.
+</DL>
+
+<A NAME=build><H2>To Build</H2></A>
+<P>
+To port TBB to a new platform, operating system or architecture, see the <A HREF=#port>porting directions</A> below.
+</P>
+
+<H3>Software prerequisites:</H3>
+<OL>
+<LI>C++ compiler for the platform, operating system and architecture of interest.
+    Either the native compiler for your system, or, optionally, the appropriate Intel&reg; C++ compiler, may be used.
+<LI>GNU make utility. On Windows*, if a UNIX* emulator is used to run GNU make,
+    it should be able to run Windows* utilities and commands. On Linux*, Mac OS* X, etc.,
+    shell commands issued by GNU make should execute in a Bourne or BASH compatible shell.
+</OL>
+
+<P>
+TBB libraries can be built by performing the following steps.
+On systems that support only one ABI (e.g., 32-bit), these steps build the libraries for that ABI.
+On systems that support both 64-bit and 32-bit libraries, these steps build the 64-bit libraries
+(Linux*, Mac OS* X, and related systems) or whichever ABI is selected in the development environment (Windows* systems).
+</P>
+<OL>
+<LI>Change to the <A HREF=../index.html>top-level directory</A> of the installed software.
+<LI>If using the Intel&reg; C++ compiler, make sure the appropriate compiler is available in your PATH
+    (e.g., by sourcing the appropriate iccvars script for the compiler to be used).
+<LI>Invoke GNU make using no arguments, for example, 'gmake'.
+</OL>
+
+<P>
+To build TBB libraries for other than the default ABI (e.g., to build 32-bit libraries on Linux*, Mac OS* X,
+or related systems that support both 64-bit and 32-bit libraries), perform the following steps.
+</P>
+<OL>
+<LI>Change to the <A HREF=../index.html>top-level directory</A> of the installed software.
+<LI>If using the Intel&reg; C++ compiler, make sure the appropriate compiler is available in your PATH
+    (e.g., by sourcing the appropriate iccvars script for the compiler to be used).
+<LI>Invoke GNU make as follows, 'gmake arch=ia32'.
+</OL>
+
+<P>The default make target will build the release and debug versions of the TBB library.</P>
+<P>Other targets are available in the top-level Makefile. You might find the following targets useful:
+<UL>
+<LI>'make test' will build and run TBB <A HREF=../src/test>unit-tests</A>;
+<LI>'make examples' will build and run TBB <A HREF=../examples/index.html>examples</A>;
+<LI>'make all' will do all of the above.
+</UL>
+See also the list of other targets below.
+</P>
+
+<P>
+By default, the libraries will be built in sub-directories within the build/ directory.
+The sub-directories are named according to the operating system, architecture, compiler and software environment used
+(the sub-directory names also distinguish release vs. debug libraries).  On Linux*, the software environment comprises
+the GCC, libc and kernel version used.  On Mac OS* X, the software environment comprises the GCC and OS version used.
+On Windows, the software environment comprises the Microsoft* Visual Studio* version used.
+See below for how to change the default build directory.
+</P>
+
+<P>
+To perform different build and/or test operations, use the following steps.
+</P>
+<OL>
+<LI>Change to the <A HREF=../index.html>top-level directory</A> of the installed software.
+<LI>If using the Intel&reg; C++ compiler, make sure the appropriate compiler is available in your PATH
+    (e.g., by sourcing the appropriate iccvars script for the compiler to be used).
+<LI>Invoke GNU make by using one or more of the following commands.
+    <DL>
+    <DT><TT>make</TT>
+    <DD>Default build.  Equivalent to 'make tbb tbbmalloc'.
+    <DT><TT>make all</TT>
+    <DD>Equivalent to 'make tbb tbbmalloc test examples'.
+    <DT><TT>cd src;make release</TT>
+    <DD>Build and test release libraries only.
+    <DT><TT>cd src;make debug</TT>
+    <DD>Build and test debug libraries only. 
+    <DT><TT>make tbb</TT>
+    <DD>Make TBB release and debug libraries.
+    <DT><TT>make tbbmalloc</TT>
+    <DD>Make TBB scalable memory allocator libraries.
+    <DT><TT>make test</TT>
+    <DD>Compile and run unit-tests
+    <DT><TT>make examples</TT>
+    <DD>Build libraries and run all examples, like doing 'make debug clean release' from
+       <A HREF=../examples/Makefile>the general example Makefile</A>.
+    <DT><TT>make compiler=<B>{</B>icl, icc<B>}</B> <B>[</B>(above options or targets)<B>]</B></TT>
+    <DD>Build and run as above, but use Intel&reg; compilers instead of default, native compilers
+       (e.g., icl instead of cl.exe on Windows* systems, or icc instead of g++ on Linux* or Mac OS* X systems).
+    <DT><TT>make arch=<B>{</B>ia32, intel64, ia64<B>}</B> <B>[</B>(above options or targets)<B>]</B></TT>
+    <DD>Build and run as above, but build libraries for the selected ABI.
+        Might be useful for cross-compilation; ensure proper environment is set before running this command.
+    <DT><TT>make tbb_root=<B>{</B>(TBB directory)<B>}</B> <B>[</B>(above options or targets)<B>]</B></TT>
+    <DD>Build and run as above; for use when invoking 'make' from a directory other than
+       the <A HREF=../index.html>top-level directory</A>.
+    <DT><TT>make tbb_build_dir=<B>{</B>(build directory)<B>}</B> <B>[</B>(above options or targets)<B>]</B></TT>
+    <DD>Build and run as above, but place the built libraries in the specified directory, rather than in the default
+       sub-directory within the build/ directory. This command might have troubles with the build in case the sources 
+       installed to the directory with spaces in the path.
+    <DT><TT>make tbb_build_prefix=<B>{</B>(build sub-directory)<B>}</B> <B>[</B>(above options or targets)<B>]</B></TT>
+    <DD>Build and run as above, but place the built libraries in the specified sub-directory within the build/ directory,
+       rather than using the default sub-directory name.
+    <DT><TT>make <B>[</B>(above options)<B>]</B> clean</TT>
+    <DD>Remove any executables or intermediate files produced by the above commands.
+        Includes build directories, object files, libraries and test executables.
+    </DL>
+</OL>
+
+<A NAME=port><H2>To Port</H2></A>
+<P>
+This section provides information on how to port TBB to a new platform, operating system or architecture.
+A subset or a superset of these steps may be required for porting to a given platform.
+</P>
+
+<H4>To port the TBB source code:</H4>
+<OL>
+<LI>If porting to a new architecture, create a file that describes the architecture-specific details for that architecture.
+    <UL>
+    <LI>Create a &lt;os&gt;_&lt;architecture&gt;.h file in the <A HREF=../include/tbb/machine>include/tbb/machine</A> directory
+       that describes these details.
+       <UL>
+       <LI>The &lt;os&gt;_&lt;architecture&gt;.h is named after the operating system and architecture as recognized by
+           <A HREF=../include/tbb/tbb_machine.h>include/tbb/tbb_machine.h</A> and the Makefile infrastructure.
+       <LI>This file defines the implementations of synchronization operations, and also the
+           scheduler yield function, for the operating system and architecture.
+       <LI>Several examples of &lt;os&gt;_&lt;architecture&gt;.h files can be found in the
+           <A HREF=../include/tbb/machine>include/tbb/machine</A> directory.
+           <UL>
+           <LI>A minimal implementation defines the 4-byte and 8-byte compare-and-swap operations,
+               and the scheduler yield function.  See <A HREF=../include/tbb/machine/mac_ppc.h>include/tbb/machine/mac_ppc.h</A>
+               for an example of a minimal implementation.
+           <LI>More complex implementation examples can also be found in the
+               <A HREF=../include/tbb/machine>include/tbb/machine</A> directory
+               that implement all the individual variants of synchronization operations that TBB uses.
+               Such implementations are more verbose but may achieve better performance on a given architecture.
+           <LI>In a given implementation, any synchronization operation that is not defined is implemented, by default,
+               in terms of 4-byte or 8-byte compare-and-swap.  More operations can thus be added incrementally to increase
+               the performance of an implementation.
+           <LI>In most cases, synchronization operations are implemented as inline assembly code; examples also exist,
+               (e.g., for Intel&reg; Itanium&reg; processors) that use out-of-line assembly code in *.s or *.asm files
+               (see the assembly code sub-directories in the <A HREF=../src/tbb>src/tbb</A> directory).
+           </UL>
+       </UL>
+    <LI>Modify <A HREF=../include/tbb/tbb_machine.h>include/tbb/tbb_machine.h</A>, if needed, to invoke the appropriate
+       &lt;os&gt;_&lt;architecture&gt;.h file in the <A HREF=../include/tbb/machine>include/tbb/machine</A> directory.
+    </UL>
+<LI>Add an implementation of DetectNumberOfWorkers() in <A HREF=../src/tbb/tbb_misc.h>src/tbb/tbb_misc.h</A>,
+    if needed, that returns the number of cores found on the system.  This is used to determine the default
+    number of threads for the TBB task scheduler.
+<LI>Either properly define FillDynamicLinks for use in
+    <A HREF=../src/tbb/cache_aligned_allocator.cpp>src/tbb/cache_aligned_allocator.cpp</A>,
+    or hardcode the allocator to be used.
+<LI>Additional types might be required in the union defined in
+    <A HREF=../include/tbb/aligned_space.h>include/tbb/aligned_space.h</A>
+    to ensure proper alignment on your platform.
+<LI>Changes may be required in <A HREF=../include/tbb/tick_count.h>include/tbb/tick_count.h</A>
+    for systems that do not provide gettimeofday.
+</OL>
+
+<H4>To port the Makefile infrastructure:</H4>
+Modify the appropriate files in the Makefile infrastructure to add a new platform, operating system or architecture as needed.
+See the Makefile infrastructure files for examples.
+<OL>
+<LI>The <A HREF=../Makefile>top-level Makefile</A> includes <A HREF=common.inc>common.inc</A> to determine the operating system.
+    <UL>
+    <LI>To add a new operating system, add the appropriate test to <A HREF=common.inc>common.inc</A>,
+       and create the needed &lt;os&gt;.inc and &lt;os&gt;.&lt;compiler&gt;.inc files (see below).
+    </UL>
+<LI>The &lt;os&gt;.inc file makes OS-specific settings for a particular &lt;os&gt;.
+    <UL>
+    <LI>For example, <A HREF=linux.inc>linux.inc</A> makes settings specific to Linux* systems.
+    <LI>This file performs OS-dependent tests to determine the specific platform and/or architecture,
+       and sets other platform-dependent values.
+    <LI>Add a new &lt;os&gt;.inc file for each new operating system added.
+    </UL>
+<LI>The &lt;os&gt;.&lt;compiler&gt;.inc file makes compiler-specific settings for a particular
+    &lt;os&gt; / &lt;compiler&gt; combination.
+    <UL>
+    <LI>For example, <A HREF=linux.gcc.inc>linux.gcc.inc</A> makes specific settings for using GCC on Linux* systems,
+       and <A HREF=linux.icc.inc>linux.icc.inc</A> makes specific settings for using the Intel&reg; C++ compiler on Linux* systems.
+    <LI>This file sets particular compiler, assembler and linker options required when using a particular
+       &lt;os&gt; / &lt;compiler&gt; combination.
+    <LI>Add a new &lt;os&gt;.&lt;compiler&gt;.inc file for each new &lt;os&gt; / &lt;compiler&gt; combination added.
+    </UL>
+</OL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<P></P>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<P></P>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<P></P>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/linux.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/linux.gcc.inc
new file mode 100644 (file)
index 0000000..e3accbb
--- /dev/null
@@ -0,0 +1,109 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall
+TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor $(if $(findstring cc4., $(runtime)),-Wextra)
+
+WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor
+DYLIB_KEY = -shared
+EXPORT_KEY = -Wl,--version-script,
+LIBDL = -ldl
+
+TBB_NOSTRICT = 1
+
+CPLUS = g++ 
+CONLY = gcc
+LIB_LINK_FLAGS = -shared -Wl,-soname=$(BUILDING_LIBRARY)
+LIBS = -lpthread -lrt 
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -DDO_ITT_NOTIFY -O2 -DUSE_PTHREAD
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -DTBB_USE_DEBUG -DDO_ITT_NOTIFY -g -O0 -DUSE_PTHREAD
+endif
+
+ifneq (0,$(cpp0x))
+    CXX_ONLY_FLAGS = -std=c++0x
+endif
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (ia64,$(arch))
+# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables
+    CPLUS_FLAGS += $(PIC_KEY)
+endif 
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32 -march=pentium4
+    LIB_LINK_FLAGS += -m32
+endif 
+
+# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify)
+ifeq (sparc,$(arch))
+    CPLUS_FLAGS    += -mcpu=v9 -m64
+    LIB_LINK_FLAGS += -mcpu=v9 -m64
+endif 
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-gas
+ifeq (ia64,$(arch))
+    ASM=as
+    ASM_FLAGS += -xexplicit
+    TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o
+endif 
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/linux.icc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/linux.icc.inc
new file mode 100644 (file)
index 0000000..a6bd81a
--- /dev/null
@@ -0,0 +1,103 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -w1
+DYLIB_KEY = -shared
+EXPORT_KEY = -Wl,--version-script,
+LIBDL = -ldl
+export COMPILER_VERSION := ICC: $(shell icc -V </dev/null 2>&1 | grep 'Version')
+#TODO: autodetection of arch from COMPILER_VERSION!!
+
+TBB_NOSTRICT = 1
+
+CPLUS = icpc 
+CONLY = icc
+
+ifeq (release,$(cfg))
+CPLUS_FLAGS = -O2 -strict-ansi -DUSE_PTHREAD
+else
+CPLUS_FLAGS = -O0 -g -strict-ansi -DUSE_PTHREAD -DTBB_USE_DEBUG
+endif
+
+ifneq (,$(codecov))
+    CPLUS_FLAGS += -prof-genx
+else
+    CPLUS_FLAGS += -DDO_ITT_NOTIFY
+endif
+
+OPENMP_FLAG = -openmp
+LIB_LINK_FLAGS = -shared -i-static -Wl,-soname=$(BUILDING_LIBRARY)
+LIBS = -lpthread -lrt 
+C_FLAGS = $(CPLUS_FLAGS)
+
+ASM=
+ASM_FLAGS=
+
+TBB_ASM.OBJ=
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -falign-stack=maintain-16-byte
+endif
+
+ifeq (ia64,$(arch))
+# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables
+    CPLUS_FLAGS += $(PIC_KEY)
+endif 
+
+ifneq (00,$(lambdas)$(cpp0x))
+       CPLUS_FLAGS += -std=c++0x -D_TBB_CPP0X
+endif
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-gas
+ifeq (ia64,$(arch))
+    ASM=ias
+    TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o
+endif 
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/linux.inc b/deal.II/contrib/tbb/tbb30_104oss/build/linux.inc
new file mode 100644 (file)
index 0000000..99fbff7
--- /dev/null
@@ -0,0 +1,122 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifndef arch
+        uname_m:=$(shell uname -m)
+        ifeq ($(uname_m),i686)
+                export arch:=ia32
+        endif
+        ifeq ($(uname_m),ia64)
+                export arch:=ia64
+        endif
+        ifeq ($(uname_m),x86_64)
+                export arch:=intel64
+        endif
+        ifeq ($(uname_m),sparc64)
+                export arch:=sparc
+        endif
+        ifndef arch
+                export arch:=$(uname_m)
+        endif
+endif
+
+ifndef runtime
+        #gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc --version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//')
+        gcc_version_full=$(shell gcc --version | grep 'gcc'| egrep -o ' [0-9]+\.[0-9]+\.[0-9]+.*' | sed -e 's/^\ //')
+        gcc_version=$(shell echo "$(gcc_version_full)" | egrep -o '^[0-9]+\.[0-9]+\.[0-9]+\s*' | head -n 1 | sed -e 's/ *//g')
+        os_version:=$(shell uname -r)
+        os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//')
+        export os_glibc_version_full:=$(shell getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //')
+        os_glibc_version:=$(shell echo "$(os_glibc_version_full)" | sed -e '2,$$d' -e 's/-.*$$//')
+        export runtime:=cc$(gcc_version)_libc$(os_glibc_version)_kernel$(os_kernel_version)
+endif
+
+native_compiler := gcc
+export compiler ?= gcc
+debugger ?= gdb
+
+CMD=sh -c
+CWD=$(shell pwd)
+RM?=rm -f
+RD?=rmdir
+MD?=mkdir -p
+NUL= /dev/null
+SLASH=/
+MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp
+MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh
+
+ifdef LD_LIBRARY_PATH
+        export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH)
+else
+        export LD_LIBRARY_PATH := .
+endif
+
+####### Build settings ########################################################
+
+OBJ = o
+DLL = so
+LIBEXT = so
+SONAME_SUFFIX =$(shell grep TBB_COMPATIBLE_INTERFACE_VERSION $(tbb_root)/include/tbb/tbb_stddef.h | egrep -o [0-9.]+)
+
+ifeq ($(arch),ia64)
+        def_prefix = lin64ipf
+endif
+ifeq ($(arch),sparc)
+        def_prefix = lin64
+endif
+ifeq (,$(def_prefix))
+    ifeq (64,$(findstring 64,$(arch)))
+            def_prefix = lin64
+    else
+            def_prefix = lin32
+    endif
+endif
+TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def
+
+TBB.DLL = $(TBB_NO_VERSION.DLL).$(SONAME_SUFFIX)
+TBB.LIB = $(TBB.DLL)
+TBB_NO_VERSION.DLL=libtbb$(DEBUG_SUFFIX).$(DLL)
+LINK_TBB.LIB = $(TBB_NO_VERSION.DLL)
+
+MALLOC_NO_VERSION.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.DEF = $(MALLOC_ROOT)/lin-tbbmalloc-export.def
+MALLOC.DLL = $(MALLOC_NO_VERSION.DLL).$(SONAME_SUFFIX)
+MALLOC.LIB = $(MALLOC_NO_VERSION.DLL)
+LINK_MALLOC.LIB = $(MALLOC_NO_VERSION.DLL)
+
+MALLOCPROXY_NO_VERSION.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL)
+MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def
+MALLOCPROXY.DLL = $(MALLOCPROXY_NO_VERSION.DLL).$(SONAME_SUFFIX)
+MALLOCPROXY.LIB = $(MALLOCPROXY_NO_VERSION.DLL)
+
+RML_NO_VERSION.DLL = libirml$(DEBUG_SUFFIX).$(DLL)
+RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def
+RML.DLL = $(RML_NO_VERSION.DLL).1
+RML.LIB = $(RML_NO_VERSION.DLL)
+
+TBB_NOSTRICT=1
+
+TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/macos.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/macos.gcc.inc
new file mode 100644 (file)
index 0000000..4ac4731
--- /dev/null
@@ -0,0 +1,90 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+CPLUS = g++
+CONLY = gcc
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall
+TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor
+WARNING_SUPPRESS = -Wno-non-virtual-dtor
+DYLIB_KEY = -dynamiclib
+EXPORT_KEY = -Wl,-exported_symbols_list,
+LIBDL = -ldl
+
+LIBS = -lpthread
+LINK_FLAGS = 
+LIB_LINK_FLAGS = -dynamiclib
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq ($(cfg), release)
+    CPLUS_FLAGS = -O2
+else
+    CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG
+endif
+
+CPLUS_FLAGS += -DUSE_PTHREAD
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    LINK_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32
+    LINK_FLAGS += -m32
+    LIB_LINK_FLAGS += -m32
+endif
+
+ifeq (ppc64,$(arch))
+    CPLUS_FLAGS += -arch ppc64
+    LINK_FLAGS += -arch ppc64
+    LIB_LINK_FLAGS += -arch ppc64
+endif
+
+ifeq (ppc32,$(arch))
+    CPLUS_FLAGS += -arch ppc
+    LINK_FLAGS += -arch ppc
+    LIB_LINK_FLAGS += -arch ppc
+endif
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/macos.icc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/macos.icc.inc
new file mode 100644 (file)
index 0000000..fbca6d9
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+CPLUS = icpc
+CONLY = icc
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY = -fPIC
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -w1
+DYLIB_KEY = -dynamiclib
+EXPORT_KEY = -Wl,-exported_symbols_list,
+LIBDL = -ldl
+export COMPILER_VERSION := $(shell icc -V </dev/null 2>&1 | grep 'Version')
+#TODO: autodetection of arch from COMPILER_VERSION!!
+
+OPENMP_FLAG = -openmp
+LIBS = -lpthread
+LINK_FLAGS = 
+LIB_LINK_FLAGS = -dynamiclib -i-static
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq ($(cfg), release)
+    CPLUS_FLAGS = -O2 -fno-omit-frame-pointer
+else
+    CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG
+endif
+
+CPLUS_FLAGS += -DUSE_PTHREAD
+
+ifneq (,$(codecov))
+    CPLUS_FLAGS += -prof-genx
+endif
+
+ifneq (00,$(lambdas)$(cpp0x))
+       CPLUS_FLAGS += -std=c++0x -D_TBB_CPP0X
+endif
+
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/macos.inc b/deal.II/contrib/tbb/tbb30_104oss/build/macos.inc
new file mode 100644 (file)
index 0000000..15c9e1d
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+####### Detections and Commands ###############################################
+ifndef arch
+ ifeq ($(shell /usr/sbin/sysctl -n hw.machine),Power Macintosh)
+   ifeq ($(shell /usr/sbin/sysctl -n hw.optional.64bitops),1)
+     export arch:=ppc64
+   else
+     export arch:=ppc32
+   endif
+ else
+   ifeq ($(shell /usr/sbin/sysctl -n hw.optional.x86_64 2>/dev/null),1)
+     export arch:=intel64
+   else
+     export arch:=ia32
+   endif
+ endif
+endif
+
+ifndef runtime
+ #gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//' )
+ gcc_version_full=$(shell gcc --version | grep 'gcc'| egrep -o ' [0-9]+\.[0-9]+\.[0-9]+.*' | sed -e 's/^\ //')
+ gcc_version=$(shell echo "$(gcc_version_full)" | egrep -o '^[0-9]+\.[0-9]+\.[0-9]+\s*' | head -n 1 | sed -e 's/ *//g')
+ os_version:=$(shell /usr/bin/sw_vers -productVersion)
+ export runtime:=cc$(gcc_version)_os$(os_version)
+endif
+
+native_compiler := gcc
+export compiler ?= gcc
+debugger ?= gdb
+
+CMD=$(SHELL) -c
+CWD=$(shell pwd)
+RM?=rm -f
+RD?=rmdir
+MD?=mkdir -p
+NUL= /dev/null
+SLASH=/
+MAKE_VERSIONS=sh $(tbb_root)/build/version_info_macos.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp
+MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh
+
+####### Build settings ########################################################
+
+OBJ=o
+DLL=dylib
+LIBEXT=dylib
+
+def_prefix = $(if $(findstring 64,$(arch)),mac64,mac32)
+
+TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def
+TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = $(TBB.DLL)
+LINK_TBB.LIB = $(TBB.LIB)
+
+MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def
+MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = $(MALLOC.DLL)
+
+TBB_NOSTRICT=1
+
+TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/suncc.map.pause b/deal.II/contrib/tbb/tbb30_104oss/build/suncc.map.pause
new file mode 100644 (file)
index 0000000..a92d08e
--- /dev/null
@@ -0,0 +1 @@
+hwcap_1 = OVERRIDE;
\ No newline at end of file
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.bat b/deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.bat
new file mode 100644 (file)
index 0000000..a261494
--- /dev/null
@@ -0,0 +1,36 @@
+@echo off
+REM
+REM Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+REM
+REM This file is part of Threading Building Blocks.
+REM
+REM Threading Building Blocks is free software; you can redistribute it
+REM and/or modify it under the terms of the GNU General Public License
+REM version 2 as published by the Free Software Foundation.
+REM
+REM Threading Building Blocks is distributed in the hope that it will be
+REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+REM GNU General Public License for more details.
+REM
+REM You should have received a copy of the GNU General Public License
+REM along with Threading Building Blocks; if not, write to the Free Software
+REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+REM
+REM As a special exception, you may use this file as part of a free software
+REM library without restriction.  Specifically, if other files instantiate
+REM templates or use macros or inline functions from this file, or you compile
+REM this file and link it with other files to produce an executable, this
+REM file does not by itself cause the resulting executable to be covered by
+REM the GNU General Public License.  This exception does not however
+REM invalidate any other reasons why the executable file might be covered by
+REM the GNU General Public License.
+REM
+
+REM no LD_PRELOAD under Windows
+if "%1"=="-l" (
+    echo skip
+    exit
+)
+
+%*
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.sh b/deal.II/contrib/tbb/tbb30_104oss/build/test_launcher.sh
new file mode 100644 (file)
index 0000000..48a382b
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+while getopts  "l:" flag #
+do #
+    if [ `uname` != 'Linux' ] ; then #
+        echo 'skip' #
+        exit #
+    fi #
+    LD_PRELOAD=$OPTARG #
+    shift `expr $OPTIND - 1` #
+done #
+# Set stack limit
+ulimit -s 10240 # 
+# Run the command line passed via parameters
+export LD_PRELOAD #
+./$* # 
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/version_info_aix.sh b/deal.II/contrib/tbb/tbb30_104oss/build/version_info_aix.sh
new file mode 100644 (file)
index 0000000..11e5abf
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# Script used to generate version info string
+echo "#define __TBB_VERSION_STRINGS \\"
+echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \'
+# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out
+echo '"TBB:' "BUILD_OS\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \'
+echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \'
+echo '"TBB:' "BUILD_GCC\t\t"`g++ -v </dev/null 2>&1 | grep 'gcc.*version'`'" ENDL \'
+[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \'
+echo '"TBB:' "BUILD_GLIBC\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \'
+echo '"TBB:' "BUILD_LD\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \'
+echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \'
+echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \'
+echo ""
+echo "#define __TBB_DATETIME \""`date -u`"\""
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/version_info_linux.sh b/deal.II/contrib/tbb/tbb30_104oss/build/version_info_linux.sh
new file mode 100644 (file)
index 0000000..11e5abf
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# Script used to generate version info string
+echo "#define __TBB_VERSION_STRINGS \\"
+echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \'
+# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out
+echo '"TBB:' "BUILD_OS\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \'
+echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \'
+echo '"TBB:' "BUILD_GCC\t\t"`g++ -v </dev/null 2>&1 | grep 'gcc.*version'`'" ENDL \'
+[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \'
+echo '"TBB:' "BUILD_GLIBC\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \'
+echo '"TBB:' "BUILD_LD\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \'
+echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \'
+echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \'
+echo ""
+echo "#define __TBB_DATETIME \""`date -u`"\""
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/version_info_macos.sh b/deal.II/contrib/tbb/tbb30_104oss/build/version_info_macos.sh
new file mode 100644 (file)
index 0000000..8ba45d6
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# Script used to generate version info string
+echo "#define __TBB_VERSION_STRINGS \\"
+echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`arch`")"'" ENDL \'
+echo '"TBB:' "BUILD_OS\t\t"`sw_vers -productName`" version "`sw_vers -productVersion`'" ENDL \'
+echo '"TBB:' "BUILD_KERNEL\t"`uname -v`'" ENDL \'
+echo '"TBB:' "BUILD_GCC\t\t"`gcc -v </dev/null 2>&1 | grep 'version'`'" ENDL \'
+[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \'
+echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \'
+echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \'
+echo ""
+echo "#define __TBB_DATETIME \""`date -u`"\""
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/version_info_sunos.sh b/deal.II/contrib/tbb/tbb30_104oss/build/version_info_sunos.sh
new file mode 100644 (file)
index 0000000..02ad491
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# Script used to generate version info string
+echo "#define __TBB_VERSION_STRINGS \\"
+echo '"TBB:' "BUILD_HOST\t"`hostname`" ("`arch`")"'" ENDL \'
+echo '"TBB:' "BUILD_OS\t\t"`uname`'" ENDL \'
+echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \'
+echo '"TBB:' "BUILD_SUNCC\t"`CC -V </dev/null 2>&1 | grep 'C++'`'" ENDL \'
+[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \'
+echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \'
+echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \'
+echo ""
+echo "#define __TBB_DATETIME \""`date -u`"\""
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/version_info_windows.js b/deal.II/contrib/tbb/tbb30_104oss/build/version_info_windows.js
new file mode 100644 (file)
index 0000000..c8e7634
--- /dev/null
@@ -0,0 +1,136 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+var WshShell = WScript.CreateObject("WScript.Shell");
+
+var tmpExec;
+
+WScript.Echo("#define __TBB_VERSION_STRINGS \\");
+
+//Getting BUILD_HOST
+WScript.echo( "\"TBB: BUILD_HOST\\t\\t" + 
+              WshShell.ExpandEnvironmentStrings("%COMPUTERNAME%") +
+              "\" ENDL \\" );
+
+//Getting BUILD_OS
+tmpExec = WshShell.Exec("cmd /c ver");
+while ( tmpExec.Status == 0 ) {
+    WScript.Sleep(100);
+}
+tmpExec.StdOut.ReadLine();
+
+WScript.echo( "\"TBB: BUILD_OS\\t\\t" + 
+              tmpExec.StdOut.ReadLine() +
+              "\" ENDL \\" );
+
+if ( WScript.Arguments(0).toLowerCase().match("gcc") ) {
+    tmpExec = WshShell.Exec("gcc --version");
+    WScript.echo( "\"TBB: BUILD_COMPILER\\t" + 
+                  tmpExec.StdOut.ReadLine() + 
+                  "\" ENDL \\" );
+
+} else { // MS / Intel compilers
+    //Getting BUILD_CL
+    tmpExec = WshShell.Exec("cmd /c echo #define 0 0>empty.cpp");
+    tmpExec = WshShell.Exec("cl -c empty.cpp ");
+    while ( tmpExec.Status == 0 ) {
+        WScript.Sleep(100);
+    }
+    var clVersion = tmpExec.StdErr.ReadLine();
+    WScript.echo( "\"TBB: BUILD_CL\\t\\t" + 
+                  clVersion +
+                  "\" ENDL \\" );
+
+    //Getting BUILD_COMPILER
+    if ( WScript.Arguments(0).toLowerCase().match("icl") ) {
+        tmpExec = WshShell.Exec("icl -c empty.cpp ");
+        while ( tmpExec.Status == 0 ) {
+            WScript.Sleep(100);
+        }
+        WScript.echo( "\"TBB: BUILD_COMPILER\\t" + 
+                      tmpExec.StdErr.ReadLine() + 
+                      "\" ENDL \\" );
+    } else {
+        WScript.echo( "\"TBB: BUILD_COMPILER\\t\\t" + 
+                      clVersion +
+                      "\" ENDL \\" );
+    }
+    tmpExec = WshShell.Exec("cmd /c del /F /Q empty.obj empty.cpp");
+}
+
+//Getting BUILD_TARGET
+WScript.echo( "\"TBB: BUILD_TARGET\\t" + 
+              WScript.Arguments(1) + 
+              "\" ENDL \\" );
+
+//Getting BUILD_COMMAND
+WScript.echo( "\"TBB: BUILD_COMMAND\\t" + WScript.Arguments(2) + "\" ENDL" );
+
+//Getting __TBB_DATETIME and __TBB_VERSION_YMD
+var date = new Date();
+WScript.echo( "#define __TBB_DATETIME \"" + date.toUTCString() + "\"" );
+WScript.echo( "#define __TBB_VERSION_YMD " + date.getUTCFullYear() + ", " + 
+              (date.getUTCMonth() > 8 ? (date.getUTCMonth()+1):("0"+(date.getUTCMonth()+1))) + 
+              (date.getUTCDate() > 9 ? date.getUTCDate():("0"+date.getUTCDate())) );
+
+
+/*
+
+Original strings
+
+#define __TBB_VERSION_STRINGS \
+"TBB: BUILD_HOST\t\tvpolin-mobl1 (ia32)" ENDL \
+"TBB: BUILD_OS\t\tMicrosoft Windows XP [Version 5.1.2600]" ENDL \
+"TBB: BUILD_CL\t\tMicrosoft (R) 32-bit C/C++ Optimizing Compiler Version 13.10.3077 for 80x86" ENDL \
+"TBB: BUILD_COMPILER\tIntel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070109Z Package ID: W_CC_C_9.1.034 " ENDL \
+"TBB: BUILD_TARGET\t" ENDL \
+"TBB: BUILD_COMMAND\t" ENDL \
+
+#define __TBB_DATETIME "Mon Jun 4 10:16:07 UTC 2007"
+#define __TBB_VERSION_YMD 2007, 0604
+
+
+
+# The script must be run from two directory levels below this level.
+x='"TBB: '
+y='" ENDL \'
+echo "#define __TBB_VERSION_STRINGS \\"
+echo $x "BUILD_HOST\t\t"`hostname`" ("`../../arch.exe`")"$y
+echo $x "BUILD_OS\t\t"`../../win_version.bat|grep -i 'Version'`$y
+echo >empty.cpp
+echo $x "BUILD_CL\t\t"`cl -c empty.cpp 2>&1 | grep -i Version`$y
+echo $x "BUILD_COMPILER\t"`icl -c empty.cpp 2>&1 | grep -i Version`$y
+echo $x "BUILD_TARGET\t"$TBB_ARCH$y
+echo $x "BUILD_COMMAND\t"$*$y
+echo ""
+# A workaround for MKS 8.6 where `date -u` crashes.
+date -u > date.tmp
+echo "#define __TBB_DATETIME \""`cat date.tmp`"\""
+echo "#define __TBB_VERSION_YMD "`date '+%Y, %m%d'`
+rm empty.cpp
+rm empty.obj
+rm date.tmp
+*/
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/index.html b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/index.html
new file mode 100644 (file)
index 0000000..a0753e4
--- /dev/null
@@ -0,0 +1,31 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+This directory contains the visual studio* 2005 solution to build Threading Building Blocks.
+
+
+<H2>Files</H2>
+<DL>
+<DT><A HREF="makefile.sln">makefile.sln</A>
+<DD>Solution file.
+<DT><A HREF="tbb.vcproj">tbb.vcproj</A>
+<DD>Library project file.
+<DT><A HREF="tbbmalloc.vcproj">tbbmalloc.vcproj</A>
+<DD>Scalable allocator library project file. Allocator sources are expected to be located in <A HREF="../../src/tbbmalloc">../../src/tbbmalloc</A> folder.
+<DT><A HREF="tbbmalloc_proxy.vcproj">tbbmalloc_proxy.vcproj</A>
+<DD>Standard allocator replacement project file. 
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<P></P>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<P></P>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<P></P>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/makefile.sln b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/makefile.sln
new file mode 100644 (file)
index 0000000..1fd7a8d
--- /dev/null
@@ -0,0 +1,100 @@
+Microsoft Visual Studio Solution File, Format Version 9.00
+# Visual Studio 2005
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbb", "tbb.vcproj", "{F62787DD-1327-448B-9818-030062BCFAA5}"
+       ProjectSection(WebsiteProperties) = preProject
+               Debug.AspNetCompiler.Debug = "True"
+               Release.AspNetCompiler.Debug = "False"
+       EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc", "tbbmalloc.vcproj", "{B15F131E-328A-4D42-ADC2-9FF4CA6306D8}"
+       ProjectSection(WebsiteProperties) = preProject
+               Debug.AspNetCompiler.Debug = "True"
+               Release.AspNetCompiler.Debug = "False"
+       EndProjectSection
+       ProjectSection(ProjectDependencies) = postProject
+               {F62787DD-1327-448B-9818-030062BCFAA5} = {F62787DD-1327-448B-9818-030062BCFAA5}
+       EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{8898CE0B-0BFB-45AE-AA71-83735ED2510D}"
+       ProjectSection(WebsiteProperties) = preProject
+               Debug.AspNetCompiler.Debug = "True"
+               Release.AspNetCompiler.Debug = "False"
+       EndProjectSection
+       ProjectSection(SolutionItems) = preProject
+               index.html = index.html
+       EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc_proxy", "tbbmalloc_proxy.vcproj", "{02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}"
+       ProjectSection(WebsiteProperties) = preProject
+               Debug.AspNetCompiler.Debug = "True"
+               Release.AspNetCompiler.Debug = "False"
+       EndProjectSection
+       ProjectSection(ProjectDependencies) = postProject
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8} = {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}
+       EndProjectSection
+EndProject
+Global
+       GlobalSection(SolutionConfigurationPlatforms) = preSolution
+               Debug|Win32 = Debug|Win32
+               Debug|x64 = Debug|x64
+               Debug-MT|Win32 = Debug-MT|Win32
+               Debug-MT|x64 = Debug-MT|x64
+               Release|Win32 = Release|Win32
+               Release|x64 = Release|x64
+               Release-MT|Win32 = Release-MT|Win32
+               Release-MT|x64 = Release-MT|x64
+       EndGlobalSection
+       GlobalSection(ProjectConfigurationPlatforms) = postSolution
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.ActiveCfg = Debug|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.Build.0 = Debug|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.ActiveCfg = Debug|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.Build.0 = Debug|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.Build.0 = Debug-MT|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.ActiveCfg = Debug-MT|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.Build.0 = Debug-MT|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.ActiveCfg = Release|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.Build.0 = Release|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.ActiveCfg = Release|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.Build.0 = Release|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.ActiveCfg = Release-MT|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.Build.0 = Release-MT|Win32
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.ActiveCfg = Release-MT|x64
+               {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.Build.0 = Release-MT|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.ActiveCfg = Debug|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.Build.0 = Debug|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.ActiveCfg = Debug|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.Build.0 = Debug|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.Build.0 = Debug-MT|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.ActiveCfg = Debug-MT|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.Build.0 = Debug-MT|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.ActiveCfg = Release|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.Build.0 = Release|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.ActiveCfg = Release|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.Build.0 = Release|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.ActiveCfg = Release-MT|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.Build.0 = Release-MT|Win32
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.ActiveCfg = Release-MT|x64
+               {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.Build.0 = Release-MT|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.ActiveCfg = Debug|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.Build.0 = Debug|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.ActiveCfg = Debug|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.Build.0 = Debug|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.Build.0 = Debug-MT|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.ActiveCfg = Debug-MT|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.Build.0 = Debug-MT|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.ActiveCfg = Release|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.Build.0 = Release|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.ActiveCfg = Release|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.Build.0 = Release|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.ActiveCfg = Release-MT|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.Build.0 = Release-MT|Win32
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.ActiveCfg = Release-MT|x64
+               {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.Build.0 = Release-MT|x64
+       EndGlobalSection
+       GlobalSection(SolutionProperties) = preSolution
+               HideSolutionNode = FALSE
+       EndGlobalSection
+EndGlobal
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbb.vcproj b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbb.vcproj
new file mode 100644 (file)
index 0000000..bafa80d
--- /dev/null
@@ -0,0 +1,506 @@
+<?xml version="1.0" encoding="windows-1251"?>\r
+<VisualStudioProject ProjectType="Visual C++" Version="8,00" Name="tbb" ProjectGUID="{F62787DD-1327-448B-9818-030062BCFAA5}" RootNamespace="tbb" Keyword="Win32Proj">\r
+       <Platforms>\r
+               <Platform Name="Win32"/>\r
+               <Platform Name="x64"/>\r
+       </Platforms>\r
+       <ToolFiles>\r
+               <DefaultToolFile FileName="masm.rules"/>\r
+       </ToolFiles>\r
+       <Configurations>\r
+               <Configuration Name="Debug|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" BasicRuntimeChecks="3" BufferSecurityCheck="false" RuntimeLibrary="3" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" BufferSecurityCheck="false" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="1" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" BasicRuntimeChecks="3" BufferSecurityCheck="false" RuntimeLibrary="1" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /W4 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" BufferSecurityCheck="false" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:&quot;$(IntDir)\tbb.def&quot;" OutputFile="$(OutDir)\tbb.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+       </Configurations>\r
+       <References>\r
+       </References>\r
+       <Files>\r
+               <Filter Name="Source Files" Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx" UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">\r
+                       <File RelativePath="..\..\src\tbb\intel64-masm\atomic_support.asm">\r
+                               <FileConfiguration Name="Debug|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Debug\atomic_support.obj&quot; /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Debug\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Release\atomic_support.obj&quot;  /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Release\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Debug-MT\atomic_support.obj&quot; /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Debug-MT\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Release-MT\atomic_support.obj&quot;  /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Release-MT\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\ia32-masm\atomic_support.asm">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\ia32-masm\lock_byte.asm">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\win32-tbb-export.def">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\win64-tbb-export.def">\r
+                               <FileConfiguration Name="Debug|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../include &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\concurrent_hash_map.cpp"/><File RelativePath="..\..\src\tbb\concurrent_queue.cpp"/><File RelativePath="..\..\src\tbb\concurrent_vector.cpp"/><File RelativePath="..\..\src\tbb\dynamic_link.cpp"/><File RelativePath="..\..\src\tbb\itt_notify.cpp"/><File RelativePath="..\..\src\tbb\cache_aligned_allocator.cpp"/><File RelativePath="..\..\src\tbb\pipeline.cpp"/><File RelativePath="..\..\src\tbb\queuing_mutex.cpp"/><File RelativePath="..\..\src\tbb\queuing_rw_mutex.cpp"/><File RelativePath="..\..\src\tbb\reader_writer_lock.cpp"/><File RelativePath="..\..\src\tbb\spin_rw_mutex.cpp"/><File RelativePath="..\..\src\tbb\spin_mutex.cpp"/><File RelativePath="..\..\src\tbb\critical_section.cpp"/><File RelativePath="..\..\src\tbb\task.cpp"/><File RelativePath="..\..\src\tbb\tbb_misc.cpp"/><File RelativePath="..\..\src\tbb\mutex.cpp"/><File RelativePath="..\..\src\tbb\recursive_mutex.cpp"/><File RelativePath="..\..\src\tbb\condition_variable.cpp"/><File RelativePath="..\..\src\tbb\tbb_thread.cpp"/><File RelativePath="..\..\src\tbb\concurrent_monitor.cpp"/><File RelativePath="..\..\src\tbb\private_server.cpp"/><File RelativePath="..\..\src\rml\client\rml_tbb.cpp"/><File RelativePath="..\..\src\tbb\task_group_context.cpp"/><File RelativePath="..\..\src\tbb\governor.cpp"/><File RelativePath="..\..\src\tbb\market.cpp"/><File RelativePath="..\..\src\tbb\arena.cpp"/><File RelativePath="..\..\src\tbb\scheduler.cpp"/><File RelativePath="..\..\src\tbb\observer_proxy.cpp"/><File RelativePath="..\..\src\tbb\tbb_statistics.cpp"/><File RelativePath="..\..\src\tbb\tbb_main.cpp"/><File RelativePath="..\..\src\old\concurrent_vector_v2.cpp"/><File RelativePath="..\..\src\old\concurrent_queue_v2.cpp"/><File RelativePath="..\..\src\old\spin_rw_mutex_v2.cpp"/><File RelativePath="..\..\src\old\task_v2.cpp"/></Filter>\r
+               <Filter Name="Header Files" Filter="h;hpp;hxx;hm;inl;inc;xsd" UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">\r
+                       <File RelativePath="..\..\include\tbb\_concurrent_queue_internal.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\_tbb_windef.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\aligned_space.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\atomic.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range2d.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range3d.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\cache_aligned_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\combinable.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_hash_map.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\semaphore.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\concurrent_monitor.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_queue.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\old\concurrent_queue_v2.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_vector.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\old\concurrent_vector_v2.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\critical_section.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\dynamic_link.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\enumerable_thread_specific.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\gate.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_assert.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_bad_expr.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_barrier.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_concurrency_tracker.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_cpu.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_eh.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_iterator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_lrb.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_m128.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_memory.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\harness_report.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\ibm_aix51.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\itt_notify.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\linux_common.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\linux_ia32.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\linux_ia64.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\linux_intel64.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\mac_ppc.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\null_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\null_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_do.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_for.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_for_each.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_invoke.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_reduce.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_scan.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_sort.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_while.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\partitioner.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\pipeline.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\compat\ppl.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\queuing_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\queuing_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\reader_writer_lock.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\recursive_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\scalable_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\spin_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\spin_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\old\spin_rw_mutex_v2.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_group.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_scheduler_init.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_scheduler_observer.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\tbb_assert_impl.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_config.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_config_lrb.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_exception.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_machine.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\tbb_misc.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_profiling.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_stddef.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_thread.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\tbb_version.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbbmalloc_proxy.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\test_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\test\test_allocator_STL.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tick_count.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\tls.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\windows_ia32.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\windows_intel64.h">\r
+                       </File>\r
+               </Filter>\r
+               <Filter Name="Resource Files" Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav" UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">\r
+                       <File RelativePath="..\..\src\tbb\tbb_resource.rc">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+               </Filter>\r
+       </Files>\r
+       <Globals>\r
+       </Globals>\r
+</VisualStudioProject>\r
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc.vcproj b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc.vcproj
new file mode 100644 (file)
index 0000000..f459f5f
--- /dev/null
@@ -0,0 +1,452 @@
+<?xml version="1.0" encoding="windows-1251"?>\r
+<VisualStudioProject ProjectType="Visual C++" Version="8,00" Name="tbbmalloc" ProjectGUID="{B15F131E-328A-4D42-ADC2-9FF4CA6306D8}" RootNamespace="tbbmalloc" Keyword="Win32Proj">\r
+       <Platforms>\r
+               <Platform Name="Win32"/>\r
+               <Platform Name="x64"/>\r
+       </Platforms>\r
+       <ToolFiles>\r
+               <DefaultToolFile FileName="masm.rules"/>\r
+       </ToolFiles>\r
+       <Configurations>\r
+               <Configuration Name="Debug|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" ExceptionHandling="0" BasicRuntimeChecks="0" RuntimeLibrary="3" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." MinimalRebuild="false" ExceptionHandling="0" BasicRuntimeChecks="0" BufferSecurityCheck="false" RuntimeLibrary="3" TreatWChar_tAsBuiltInType="true" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" BufferSecurityCheck="false" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" ExceptionHandling="0" BasicRuntimeChecks="0" RuntimeLibrary="1" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include" Optimization="0" AdditionalIncludeDirectories="." MinimalRebuild="false" ExceptionHandling="0" BufferSecurityCheck="false" BasicRuntimeChecks="0" RuntimeLibrary="1" TreatWChar_tAsBuiltInType="true" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHs- /Zc:forScope /Zc:wchar_t /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" BufferSecurityCheck="false" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO /DEF:&quot;$(IntDir)\tbbmalloc.def&quot;" OutputFile="$(OutDir)\tbbmalloc.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+       </Configurations>\r
+       <References>\r
+       </References>\r
+       <Files>\r
+               <Filter Name="Source Files" Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx" UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">\r
+                       <File RelativePath="..\..\src\tbb\intel64-masm\atomic_support.asm">\r
+                               <FileConfiguration Name="Debug|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Debug\atomic_support.obj&quot; /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Debug\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Release\atomic_support.obj&quot;  /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Release\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Debug-MT\atomic_support.obj&quot; /DUSE_FRAME_POINTER /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Debug-MT\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32" ExcludedFromBuild="true">\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="building atomic_support.obj" CommandLine="ml64 /Fo&quot;intel64\Release-MT\atomic_support.obj&quot;  /DEM64T=1 /c /Zi ../../src/tbb/intel64-masm/atomic_support.asm\r
+" Outputs="intel64\Release-MT\atomic_support.obj"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\ia32-masm\atomic_support.asm">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbb\ia32-masm\lock_byte.asm">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM" AdditionalOptions="/coff /Zi"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="MASM"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\win32-tbbmalloc-export.def">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win32-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win32-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\win64-tbbmalloc-export.def">\r
+                               <FileConfiguration Name="Debug|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32" ExcludedFromBuild="true">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbb.def file" CommandLine="cl /nologo /TC /EP ../../src/tbb/win64-tbb-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbb.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbb.def&quot;"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCCustomBuildTool" Description="generating tbbmalloc.def file" CommandLine="cl /nologo /TC /EP ../../src/tbbmalloc/win64-tbbmalloc-export.def /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /D__TBB_BUILD=1 &gt;&quot;$(IntDir)\tbbmalloc.def&quot;\r
+" Outputs="&quot;$(IntDir)\tbbmalloc.def&quot;"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\tbbmalloc.cpp"/><File RelativePath="..\..\src\tbb\dynamic_link.cpp"/><File RelativePath="..\..\src\tbbmalloc\frontend.cpp"/><File RelativePath="..\..\src\tbbmalloc\backend.cpp"/><File RelativePath="..\..\src\tbbmalloc\large_objects.cpp"/><File RelativePath="..\..\src\tbbmalloc\backref.cpp"/><File RelativePath="..\..\src\tbb\tbb_misc.cpp"/><File RelativePath="..\..\src\tbb\itt_notify.cpp"/></Filter>\r
+               <Filter Name="Header Files" Filter="h;hpp;hxx;hm;inl;inc;xsd" UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">\r
+                       <File RelativePath="..\..\include\tbb\_concurrent_queue_internal.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\_tbb_windef.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\aligned_space.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\atomic.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range2d.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\blocked_range3d.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\cache_aligned_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\combinable.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_hash_map.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_queue.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\concurrent_vector.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\critical_section.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\Customize.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\enumerable_thread_specific.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\LifoList.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\MapMemory.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\null_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\null_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_do.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_for.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_for_each.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_invoke.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_reduce.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_scan.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_sort.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\parallel_while.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\partitioner.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\pipeline.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\proxy.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\queuing_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\queuing_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\recursive_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\scalable_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\spin_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\spin_rw_mutex.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\Statistics.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_group.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_scheduler_init.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\task_scheduler_observer.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_allocator.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_config.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_config_lrb.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_exception.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\tbb_function_replacement.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_machine.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_profiling.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_stddef.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_thread.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbb_version.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbbmalloc_proxy.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tick_count.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\src\tbbmalloc\TypeDefinitions.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\windows_ia32.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\machine\windows_intel64.h">\r
+                       </File>\r
+               </Filter>\r
+               <Filter Name="Resource Files" Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav" UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">\r
+                       <File RelativePath="..\..\src\tbbmalloc\tbbmalloc.rc">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+               </Filter>\r
+       </Files>\r
+       <Globals>\r
+       </Globals>\r
+</VisualStudioProject>\r
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj
new file mode 100644 (file)
index 0000000..233f247
--- /dev/null
@@ -0,0 +1,206 @@
+<?xml version="1.0" encoding="windows-1251"?>\r
+<VisualStudioProject ProjectType="Visual C++" Version="8,00" Name="tbbmalloc_proxy" ProjectGUID="{02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}" RootNamespace="tbbmalloc_proxy" Keyword="Win32Proj">\r
+       <Platforms>\r
+               <Platform Name="Win32"/>\r
+               <Platform Name="x64"/>\r
+       </Platforms>\r
+       <ToolFiles>\r
+               <DefaultToolFile FileName="masm.rules"/>\r
+       </ToolFiles>\r
+       <Configurations>\r
+               <Configuration Name="Debug|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" ExceptionHandling="1" BasicRuntimeChecks="0" RuntimeLibrary="3" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO " OutputFile="$(OutDir)\tbbmalloc_proxy_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MDd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." MinimalRebuild="false" ExceptionHandling="0" BasicRuntimeChecks="0" BufferSecurityCheck="false" RuntimeLibrary="3" TreatWChar_tAsBuiltInType="true" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO" OutputFile="$(OutDir)\tbbmalloc_proxy_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  " OutputFile="$(OutDir)\tbbmalloc_proxy.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MD /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /D_USE_RTM_VERSION /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /Wp64 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" BufferSecurityCheck="false" RuntimeLibrary="2" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO" OutputFile="$(OutDir)\tbbmalloc_proxy.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." PreprocessorDefinitions="" MinimalRebuild="true" ExceptionHandling="1" BasicRuntimeChecks="0" RuntimeLibrary="1" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO " OutputFile="$(OutDir)\tbbmalloc_proxy_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Debug-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MTd /Od /Ob0 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" Optimization="0" AdditionalIncludeDirectories="." MinimalRebuild="false" ExceptionHandling="0" BasicRuntimeChecks="0" BufferSecurityCheck="false" RuntimeLibrary="1" TreatWChar_tAsBuiltInType="true" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3" ShowIncludes="false"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO" OutputFile="$(OutDir)\tbbmalloc_proxy_debug.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|Win32" OutputDirectory="$(SolutionDir)ia32\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)ia32\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO  " OutputFile="$(OutDir)\tbbmalloc_proxy.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="1"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+               <Configuration Name="Release-MT|x64" OutputDirectory="$(SolutionDir)intel64\$(ConfigurationName)" IntermediateDirectory="$(SolutionDir)intel64\$(ConfigurationName)" ConfigurationType="2" CharacterSet="0" WholeProgramOptimization="1">\r
+                       <Tool Name="VCPreBuildEventTool"/>\r
+                       <Tool Name="VCCustomBuildTool"/>\r
+                       <Tool Name="MASM"/>\r
+                       <Tool Name="VCXMLDataGeneratorTool"/>\r
+                       <Tool Name="VCMIDLTool" TargetEnvironment="3"/>\r
+                       <Tool Name="VCCLCompilerTool" AdditionalOptions=" /c /MT /O2 /Zi /EHsc /GR /Zc:forScope /Zc:wchar_t /GS- /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400 /W4 /I../../src /I../../src/rml/include /I../../include /I../../src/tbbmalloc /I../../src/tbbmalloc" AdditionalIncludeDirectories="." PreprocessorDefinitions="" ExceptionHandling="0" BufferSecurityCheck="false" RuntimeLibrary="0" UsePrecompiledHeader="0" WarningLevel="4" DebugInformationFormat="3"/>\r
+                       <Tool Name="VCManagedResourceCompilerTool"/>\r
+                       <Tool Name="VCResourceCompilerTool"/>\r
+                       <Tool Name="VCPreLinkEventTool"/>\r
+                       <Tool Name="VCLinkerTool" AdditionalOptions="/nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO" OutputFile="$(OutDir)\tbbmalloc_proxy.dll" LinkIncremental="1" GenerateDebugInformation="true" SubSystem="2" OptimizeReferences="2" EnableCOMDATFolding="2" TargetMachine="17"/>\r
+                       <Tool Name="VCALinkTool"/>\r
+                       <Tool Name="VCManifestTool"/>\r
+                       <Tool Name="VCXDCMakeTool"/>\r
+                       <Tool Name="VCBscMakeTool"/>\r
+                       <Tool Name="VCFxCopTool"/>\r
+                       <Tool Name="VCAppVerifierTool"/>\r
+                       <Tool Name="VCPostBuildEventTool"/>\r
+               </Configuration>\r
+       </Configurations>\r
+       <References>\r
+       </References>\r
+       <Files>\r
+               <Filter Name="Source Files" Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx" UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">\r
+                       <File RelativePath="..\..\src\tbbmalloc\proxy.cpp"/><File RelativePath="..\..\src\tbbmalloc\tbb_function_replacement.cpp"/></Filter>\r
+               <Filter Name="Header Files" Filter="h;hpp;hxx;hm;inl;inc;xsd" UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">\r
+                       <File RelativePath="..\..\src\tbbmalloc\tbb_function_replacement.h">\r
+                       </File>\r
+                       <File RelativePath="..\..\include\tbb\tbbmalloc_proxy.h">\r
+                       </File>\r
+               </Filter>\r
+               <Filter Name="Resource Files" Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav" UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">\r
+                       <File RelativePath="..\..\src\tbbmalloc\tbbmalloc.rc">\r
+                               <FileConfiguration Name="Debug|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Debug-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|Win32">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                               <FileConfiguration Name="Release-MT|x64">\r
+                                       <Tool Name="VCResourceCompilerTool" AdditionalOptions="/I../../src /I../../include /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE /D_WIN32_WINNT=0x0400"/>\r
+                               </FileConfiguration>\r
+                       </File>\r
+               </Filter>\r
+       </Files>\r
+       <Globals>\r
+       </Globals>\r
+</VisualStudioProject>\r
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/version_string.tmp b/deal.II/contrib/tbb/tbb30_104oss/build/vsproject/version_string.tmp
new file mode 100644 (file)
index 0000000..2098d67
--- /dev/null
@@ -0,0 +1 @@
+#define __TBB_VERSION_STRINGS "Empty"
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/windows.cl.inc b/deal.II/contrib/tbb/tbb30_104oss/build/windows.cl.inc
new file mode 100644 (file)
index 0000000..39481aa
--- /dev/null
@@ -0,0 +1,123 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Define compiler-specific variables.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting compiler flags.
+#------------------------------------------------------------------------------
+CPLUS = cl /nologo
+LINK_FLAGS = /link /nologo
+LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO
+ifeq ($(runtime), vc_mt)
+       MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d)
+else
+       MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d)
+endif
+EH_FLAGS = /EHsc /GR
+              
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t
+        ASM_FLAGS =
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG
+        ASM_FLAGS = /DUSE_FRAME_POINTER
+endif
+
+
+COMPILE_ONLY = /c
+PREPROC_ONLY = /TC /EP
+INCLUDE_KEY = /I
+DEFINE_KEY = /D
+OUTPUT_KEY = /Fe
+OUTPUTOBJ_KEY = /Fo
+WARNING_AS_ERROR_KEY = /WX
+
+ifeq ($(runtime),vc7.1)
+        WARNING_KEY = /W3
+else
+        WARNING_KEY = /W4
+endif
+
+DYLIB_KEY = /DLL
+EXPORT_KEY = /DEF:
+
+ifeq ($(runtime),vc8)
+        OPENMP_FLAG = /openmp
+        WARNING_KEY += /Wp64
+        CPLUS_FLAGS += /D_USE_RTM_VERSION
+endif
+ifeq ($(runtime),vc9)
+        OPENMP_FLAG = /openmp
+endif
+ifeq ($(runtime),vc_mt)
+        OPENMP_FLAG = /openmp
+endif
+ifeq (intel64,$(arch))
+        CPLUS_FLAGS += /GS-
+endif
+
+CPLUS_FLAGS += /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \
+        /D_WIN32_WINNT=$(_WIN32_WINNT)
+C_FLAGS = $(CPLUS_FLAGS)
+#------------------------------------------------------------------------------
+# End of setting compiler flags.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-masm
+ifeq (intel64,$(arch))
+    ASM=ml64 /nologo
+    ASM_FLAGS += /DEM64T=1 /c /Zi
+    TBB_ASM.OBJ = atomic_support.obj
+else
+    ASM=ml /nologo
+    ASM_FLAGS += /c /coff /Zi
+    TBB_ASM.OBJ = atomic_support.obj lock_byte.obj
+endif
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS))
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# End of define compiler-specific variables.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/windows.gcc.inc b/deal.II/contrib/tbb/tbb30_104oss/build/windows.gcc.inc
new file mode 100644 (file)
index 0000000..b0caa89
--- /dev/null
@@ -0,0 +1,132 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Overriding settings from windows.inc
+#------------------------------------------------------------------------------
+
+SLASH= $(strip \)
+OBJ = o
+LIBEXT = dll # MinGW allows linking with DLLs directly
+
+TBB.RES =
+MALLOC.RES =
+RML.RES =
+TBB.MANIFEST =
+MALLOC.MANIFEST =
+RML.MANIFEST =
+
+# TODO: do better when/if mingw64 support is added
+ifeq (ia32,$(arch))
+    TBB.DEF = $(tbb_root)/src/tbb/lin32-tbb-export.def
+else 
+    TBB.DEF = $(tbb_root)/src/tbb/win64-gcc-tbb-export.def
+endif 
+MALLOC.DEF = $(MALLOC_ROOT)/win-gcc-tbbmalloc-export.def
+RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def
+
+LINK_TBB.LIB = $(TBB.LIB)
+
+#------------------------------------------------------------------------------
+# End of overridden settings
+#------------------------------------------------------------------------------
+# Compiler-specific variables
+#------------------------------------------------------------------------------
+
+CPLUS = g++ 
+COMPILE_ONLY = -c -MMD
+PREPROC_ONLY = -E -x c
+INCLUDE_KEY = -I
+DEFINE_KEY = -D
+OUTPUT_KEY = -o #
+OUTPUTOBJ_KEY = -o #
+PIC_KEY =
+WARNING_AS_ERROR_KEY = -Werror
+WARNING_KEY = -Wall  -Wno-uninitialized
+WARNING_SUPPRESS = -Wno-parentheses
+DYLIB_KEY = -shared
+LIBDL = 
+EXPORT_KEY = -Wl,--version-script,
+LIBS = -lpsapi
+
+#------------------------------------------------------------------------------
+# End of compiler-specific variables
+#------------------------------------------------------------------------------
+# Command lines
+#------------------------------------------------------------------------------
+
+LINK_FLAGS = -Wl,--enable-auto-import
+LIB_LINK_FLAGS = $(DYLIB_KEY)
+
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = -O2
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG
+endif
+CPLUS_FLAGS += -DUSE_WINTHREAD
+
+# MinGW specific
+CPLUS_FLAGS += -D__MSVCRT_VERSION__=0x0700 -msse -mthreads
+
+CONLY = gcc
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += -m64
+    LIB_LINK_FLAGS += -m64
+endif 
+
+ifeq (ia32,$(arch))
+    CPLUS_FLAGS += -m32
+    LIB_LINK_FLAGS += -m32
+endif 
+
+# For examples
+export UNIXMODE = 1
+
+#------------------------------------------------------------------------------
+# End of command lines
+#------------------------------------------------------------------------------
+# Setting assembler data
+#------------------------------------------------------------------------------
+
+ASM=
+ASM_FLAGS=
+TBB_ASM.OBJ=
+ASSEMBLY_SOURCE=$(arch)-gas
+
+#------------------------------------------------------------------------------
+# End of setting assembler data
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data
+#------------------------------------------------------------------------------
+
+M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions
+
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/windows.icl.inc b/deal.II/contrib/tbb/tbb30_104oss/build/windows.icl.inc
new file mode 100644 (file)
index 0000000..d2a12e8
--- /dev/null
@@ -0,0 +1,151 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Define compiler-specific variables.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting default configuration to release.
+#------------------------------------------------------------------------------
+cfg ?= release
+#------------------------------------------------------------------------------
+# End of setting default configuration to release.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting compiler flags.
+#------------------------------------------------------------------------------
+CPLUS = icl /nologo $(VCCOMPAT_FLAG)
+LINK_FLAGS = /link /nologo
+LIB_LINK_FLAGS= /link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO
+ifeq ($(runtime), vc_mt)
+       MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d)
+else
+       MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d)
+endif
+EH_FLAGS = /EHsc /GR
+
+ifeq ($(cfg), release)
+    CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t
+    ASM_FLAGS =
+endif
+ifeq ($(cfg), debug)
+    CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG
+    ASM_FLAGS = /DUSE_FRAME_POINTER
+endif
+
+
+COMPILE_ONLY = /c /QMMD
+PREPROC_ONLY = /EP /Tc
+INCLUDE_KEY = /I
+DEFINE_KEY = /D
+OUTPUT_KEY = /Fe
+OUTPUTOBJ_KEY = /Fo
+WARNING_AS_ERROR_KEY = /WX
+WARNING_KEY = /W3
+DYLIB_KEY = /DLL
+EXPORT_KEY = /DEF:
+
+ifeq (intel64,$(arch))
+    CPLUS_FLAGS += /GS-
+endif
+
+ifneq (,$(codecov))
+    CPLUS_FLAGS += /Qprof-genx
+else
+    CPLUS_FLAGS += /DDO_ITT_NOTIFY
+endif
+
+OPENMP_FLAG = /Qopenmp
+CPLUS_FLAGS += /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \
+               /D_WIN32_WINNT=$(_WIN32_WINNT)
+
+ifeq ($(runtime),vc8)
+        CPLUS_FLAGS += /D_USE_RTM_VERSION
+endif
+
+
+C_FLAGS = $(CPLUS_FLAGS)
+
+ifneq (00,$(lambdas)$(cpp0x))
+       CPLUS_FLAGS += /Qstd=c++0x /D_TBB_CPP0X
+endif
+
+VCVERSION:=$(runtime)
+VCCOMPAT_FLAG ?= $(if $(findstring vc7.1, $(VCVERSION)),/Qvc7.1)
+ifeq ($(VCCOMPAT_FLAG),)
+        VCCOMPAT_FLAG := $(if $(findstring vc8, $(VCVERSION)),/Qvc8)
+endif
+ifeq ($(VCCOMPAT_FLAG),)
+        VCCOMPAT_FLAG := $(if $(findstring vc_mt, $(VCVERSION)),/Qvc8)
+endif
+ifeq ($(VCCOMPAT_FLAG),)
+        VCCOMPAT_FLAG := $(if $(findstring vc9, $(VCVERSION)),/Qvc9)
+endif
+ifeq ($(VCCOMPAT_FLAG),)
+        VCCOMPAT_FLAG := $(if $(findstring vc10, $(VCVERSION)),/Qvc10)
+endif
+ifeq ($(VCCOMPAT_FLAG),)
+        $(error VC version not detected correctly: $(VCVERSION) )
+endif
+export VCCOMPAT_FLAG
+#------------------------------------------------------------------------------
+# End of setting compiler flags.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-masm
+ifeq (intel64,$(arch))
+    ASM=ml64 /nologo
+    ASM_FLAGS += /DEM64T=1 /c /Zi
+    TBB_ASM.OBJ = atomic_support.obj
+else
+    ASM=ml /nologo
+    ASM_FLAGS += /c /coff /Zi
+    TBB_ASM.OBJ = atomic_support.obj lock_byte.obj
+endif
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS))
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# End of define compiler-specific variables.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/windows.inc b/deal.II/contrib/tbb/tbb30_104oss/build/windows.inc
new file mode 100644 (file)
index 0000000..3900ed3
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+export SHELL = cmd
+
+ifdef tbb_build_dir
+  test_dir:=$(tbb_build_dir)
+else
+  test_dir:=.
+endif
+
+# TODO give an error if archs doesn't match
+ifndef arch
+  export arch:=$(shell cmd /C "cscript /nologo /E:jscript $(tbb_root)/build/detect.js /arch $(compiler)")
+endif
+
+ifndef runtime
+  export runtime:=$(shell cmd /C "cscript /nologo /E:jscript $(tbb_root)/build/detect.js /runtime $(compiler)")
+endif
+
+native_compiler := cl
+export compiler ?= cl
+debugger ?= devenv /debugexe
+
+CMD=cmd /C
+CWD=$(shell cmd /C echo %CD%)
+RM=cmd /C del /Q /F
+RD=cmd /C rmdir
+MD=cmd /c mkdir
+SLASH=\\
+NUL = nul
+
+OBJ = obj
+DLL = dll
+LIBEXT = lib
+
+def_prefix = $(if $(findstring ia32,$(arch)),win32,win64)
+
+# Target Windows version. Do not increase beyond 0x0500 without prior discussion!
+# Used as the value for macro definition opiton in windows.cl.inc etc.
+_WIN32_WINNT=0x0400
+
+TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def
+TBB.DLL = tbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = tbb$(DEBUG_SUFFIX).$(LIBEXT)
+TBB.RES = tbb_resource.res
+# On Windows, we use #pragma comment to set the proper TBB lib to link with
+# But for cross-configuration testing, need to link explicitly
+LINK_TBB.LIB = $(if $(crosstest),$(TBB.LIB))
+TBB.MANIFEST = 
+ifneq ($(filter vc8 vc9,$(runtime)),)
+    TBB.MANIFEST = tbbmanifest.exe.manifest
+endif
+
+MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def
+MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT)
+MALLOC.RES = tbbmalloc.res
+MALLOC.MANIFEST =
+ifneq ($(filter vc8 vc9,$(runtime)),)
+MALLOC.MANIFEST = tbbmanifest.exe.manifest
+endif
+LINK_MALLOC.LIB = $(MALLOC.LIB)
+
+MALLOCPROXY.DLL = tbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL)
+MALLOCPROXY.LIB = tbbmalloc_proxy$(DEBUG_SUFFIX).$(LIBEXT)
+
+RML.DEF = $(RML_SERVER_ROOT)/$(def_prefix)-rml-export.def
+RML.DLL = irml$(DEBUG_SUFFIX).$(DLL)
+RML.LIB = irml$(DEBUG_SUFFIX).$(LIBEXT)
+RML.RES = irml.res
+ifneq ($(filter vc8 vc9,$(runtime)),)
+RML.MANIFEST = tbbmanifest.exe.manifest
+endif
+
+MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(CPLUS) $(CPLUS_FLAGS)") > version_string.tmp
+MAKE_TBBVARS  = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat"
+
+TEST_LAUNCHER =  $(subst /,\,$(tbb_root))\build\test_launcher.bat
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/xbox360.cl.inc b/deal.II/contrib/tbb/tbb30_104oss/build/xbox360.cl.inc
new file mode 100644 (file)
index 0000000..cd04d00
--- /dev/null
@@ -0,0 +1,119 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+#------------------------------------------------------------------------------
+# Define compiler-specific variables.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting compiler flags.
+#------------------------------------------------------------------------------
+CPLUS = cl /nologo
+LINK_FLAGS = /link /nologo
+LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG
+MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d)
+EH_FLAGS = /EHsc /GR
+               
+ifeq ($(cfg), release)
+        CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /D_XBOX /DTBB_NO_LEGACY=1
+        ASM_FLAGS =
+endif
+ifeq ($(cfg), debug)
+        CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope \
+                      /DTBB_DO_ASSERT /D_XBOX /DTBB_NO_LEGACY=1
+        ASM_FLAGS = /DUSE_FRAME_POINTER
+endif
+
+
+COMPILE_ONLY = /c
+PREPROC_ONLY = /TC /EP
+INCLUDE_KEY = /I
+DEFINE_KEY = /D
+OUTPUT_KEY = /Fe
+OUTPUTOBJ_KEY = /Fo
+WARNING_AS_ERROR_KEY = /WX
+WARNING_KEY = /W3
+DYLIB_KEY = /DLL
+EXPORT_KEY = /DEF:
+
+        OPENMP_FLAG = /openmp
+ifeq ($(runtime),vc8)
+        OPENMP_FLAG = /openmp
+endif
+ifeq ($(runtime),vc9)
+        OPENMP_FLAG = /openmp
+endif
+
+ifeq (em64t,$(arch))
+        CPLUS_FLAGS += /GS-
+endif
+
+
+
+CPLUS_FLAGS += /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \
+        /D_WIN32_WINNT=$(_WIN32_WINNT)
+C_FLAGS = $(CPLUS_FLAGS) /TC
+#------------------------------------------------------------------------------
+# End of setting compiler flags.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting assembler data.
+#------------------------------------------------------------------------------
+ASSEMBLY_SOURCE=$(arch)-masm
+ifeq (XBOX360,$(arch))
+#do nothing for XBOX360
+else
+
+ifeq (em64t,$(arch))
+    ASM=ml64
+    ASM_FLAGS += /DEM64T=1 /c /Zi
+    TBB_ASM.OBJ =
+else
+    ASM=ml
+    ASM_FLAGS += /c /coff /Zi
+    TBB_ASM.OBJ =
+endif
+
+endif
+#------------------------------------------------------------------------------
+# End of setting assembler data.
+#------------------------------------------------------------------------------
+
+
+#------------------------------------------------------------------------------
+# Setting tbbmalloc data.
+#------------------------------------------------------------------------------
+M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS))
+#------------------------------------------------------------------------------
+# End of setting tbbmalloc data.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# End of define compiler-specific variables.
+#------------------------------------------------------------------------------
diff --git a/deal.II/contrib/tbb/tbb30_104oss/build/xbox360.inc b/deal.II/contrib/tbb/tbb30_104oss/build/xbox360.inc
new file mode 100644 (file)
index 0000000..8852934
--- /dev/null
@@ -0,0 +1,77 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+ifdef tbb_build_dir
+  test_dir:=$(tbb_build_dir)
+else
+  test_dir:=.
+endif
+
+# TODO give an error if archs doesn't match
+ifndef arch
+  export arch:=xbox360
+endif
+
+ifndef runtime
+  export runtime:=xdk
+endif
+
+native_compiler := cl
+export compiler ?= cl
+debugger ?= devenv /debugexe
+
+CMD=cmd /C
+CWD=$(shell cmd /C echo %CD%)
+RM=cmd /C del /Q /F
+RD=cmd /C rmdir
+MD=cmd /c mkdir
+SLASH=\\
+NUL = nul
+
+OBJ = obj
+DLL = dll
+LIBEXT = lib
+
+def_prefix = $(arch)
+
+# Target Windows version. Do not increase beyond 0x0500 without prior discussion!
+# Used as the value for macro definition opiton in compiler specific inc files.
+_WIN32_WINNT=0x0400
+
+TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def
+TBB.DLL = tbb$(DEBUG_SUFFIX).$(DLL)
+TBB.LIB = tbb$(DEBUG_SUFFIX).$(LIBEXT)
+TBB.RES =
+#On Windows we specify appropriate tbb library using #pragma comment
+LINK_TBB.LIB =
+
+MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def
+MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL)
+MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT)
+MALLOC.RES =
+
+MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(CPLUS) $(CPLUS_FLAGS) $(INCLUDES)") > version_string.tmp
+MAKE_TBBVARS  = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat"
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/Release_Notes.txt b/deal.II/contrib/tbb/tbb30_104oss/doc/Release_Notes.txt
new file mode 100644 (file)
index 0000000..9e4d32b
--- /dev/null
@@ -0,0 +1,146 @@
+------------------------------------------------------------------------
+Intel(R) Threading Building Blocks - Release Notes
+                 Version 3.0
+------------------------------------------------------------------------
+
+
+System Requirements
+-------------------
+
+Intel(R) Threading Building Blocks (Intel(R) TBB) is available
+commercially (see http://www.intel.com/software/products/threading) as a
+binary distribution, and in open source, in both source and binary
+forms (see http://threadingbuildingblocks.org).
+
+When built from source, Intel(R) TBB is intended to be highly portable
+and so supports a wide variety of operating systems and platforms (see
+http://threadingbuildingblocks.org for more details).
+
+Binary distributions, including commercial distributions, are validated
+and officially supported for the hardware, software, operating systems
+and compilers listed here.
+
+Hardware - Recommended
+
+    Microsoft* Windows* Systems
+       Intel(R) Core(TM) 2 Duo processor or Intel(R) Xeon(R) processor
+           or higher
+    Linux* Systems
+       Intel(R) Core(TM) 2 Duo processor or Intel(R) Xeon(R) processor
+           or Intel(R) Itanium(R) processor or higher
+    Mac OS* X Systems
+       Intel(R) Core(TM) 2 Duo processor or higher
+
+Hardware - Supported
+
+    Intel(R) Pentium(R) 4 processor family and higher
+    Intel(R) Itanium(R) processor family (Linux* systems only)
+    Non Intel(R) processors compatible with the above processors
+
+Software - Minimum Requirements
+
+    Supported operating system (see below)
+    Supported compiler (see below)
+    Xcode* tool suite 3.1.2 or higher (Mac OS* X systems only)
+    Microsoft* Internet Explorer* 6.0, or other compatible web browser
+    Adobe(R) Reader(R)* 6.0 or higher
+
+Software - Recommended
+
+    Intel(R) Parallel Studio
+    Intel(R) C++ Compiler 10.1 or higher
+    Intel(R) Thread Checker 3.1 or higher
+    Intel(R) Thread Profiler 3.1 or higher
+
+Software - Supported Operating Systems
+
+    Microsoft* Windows* Systems
+       Microsoft* Windows* XP Professional
+       Microsoft* Windows* Server 2003
+       Microsoft* Windows* Vista
+       Microsoft* Windows* Server 2008
+       Microsoft* Windows* 7
+    Linux* Systems
+       Red Hat* Enterprise Linux* 4, 5
+           (when using Red Hat* Enterprise Linux* 4 with Intel(R)
+           Itanium(R) processors, operating system Update 2 or higher
+           is recommended)
+       Fedora* 10, 11, 12
+           (not with Intel(R) Itanium(R) processors)
+       Asianux* 3.0
+       Debian* GNU/Linux* 5.0
+       Ubuntu* 9.04, 9.10
+       SuSE* Linux* Enterprise Server (SLES) 10, 11
+    Mac OS* X Systems
+       Mac OS* X 10.5.6 or higher
+
+Software - Supported Compilers
+
+    Microsoft* Visual C++ 8.0 (Microsoft* Visual Studio* 2005,
+       Windows* systems only)
+    Microsoft* Visual C++ 9.0 (Microsoft* Visual Studio* 2008,
+       Windows* systems only)
+    Microsoft* Visual C++ 10.0 (Microsoft* Visual Studio* 2010,
+       Windows* systems only)
+    Intel(R) C++ Compiler 10.1 or higher
+    For each supported Linux* operating system, the standard gcc
+       version provided with that operating system is supported,
+       including gcc 3.4 through 4.4
+    For each supported Mac OS* X operating system, the standard gcc
+       version provided with that operating system is supported,
+       including: 4.0.1 or higher (Xcode* tool suite 3.1.2 or higher)
+
+
+Known Issues
+------------
+
+Please note the following with respect to this particular release of
+Intel(R) Threading Building Blocks.
+
+Library Issues
+
+    - Unhandled exceptions in the user code executed in the context of
+       TBB algorithms or containers may cause segmentation faults when
+       Intel(R) C++ Compiler 10.x is used with glibc 2.3.2 to 2.3.4.
+
+    - To allow more accurate results to be obtained with Intel(R) Thread
+       Checker or Intel(R) Thread Profiler, download the latest update
+       releases of these products before using them with Intel(R)
+       Threading Building Blocks.
+
+    - If you are using Intel(R) Threading Building Blocks and OpenMP*
+       constructs mixed together in rapid succession in the same
+       program, and you are using Intel(R) compilers for your OpenMP*
+       code, set KMP_BLOCKTIME to a small value (e.g., 20 milliseconds)
+       to improve performance.  This setting can also be made within
+       your OpenMP* code via the kmp_set_blocktime() library call.  See
+       the Intel(R) compiler OpenMP* documentation for more details on
+       KMP_BLOCKTIME and kmp_set_blocktime().
+
+    - In general, non-debug ("release") builds of applications or
+       examples should link against the non-debug versions of the
+       Intel(R) Threading Building Blocks libraries, and debug builds
+       should link against the debug versions of these libraries.  On
+       Windows* systems, compile with /MD and use Intel(R) Threading
+       Building Blocks release libraries, or compile with /MDd and use
+       debug libraries; not doing so may cause run-time failures.  See
+       the Tutorial in the product "doc" sub-directory for more details
+       on debug vs. release libraries.
+    
+    - Installer doesn't accept non-commercial serial numbers on 
+       Fedora* 11. To install the product on the system use license 
+       file provided after registration of the serial number.
+
+    - Intel(R) TBB 2.2 Update 1 fixed known bugs with exception handling
+       in concurrent_vector. For applications using both exceptions 
+       and concurrent_vector, recompilation with TBB 2.2 Update 1
+       or a newer version is recommended.
+
+------------------------------------------------------------------------
+Copyright (C) 2005-2010 Intel Corporation.  All Rights Reserved.
+
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00001.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00001.html
new file mode 100644 (file)
index 0000000..3964798
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::aligned_space&lt; T, N &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()</td><td><a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">end</a>()</td><td><a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00002.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00002.html
new file mode 100644 (file)
index 0000000..71efbbf
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::atomic&lt; T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(T rhs) (defined in <a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a>)</td><td><a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const atomic&lt; T &gt; &amp;rhs) (defined in <a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a>)</td><td><a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00003.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00003.html
new file mode 100644 (file)
index 0000000..c62433b
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::atomic&lt; void * &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(void *rhs) (defined in <a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a>)</td><td><a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const atomic&lt; void * &gt; &amp;rhs) (defined in <a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a>)</td><td><a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00004.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00004.html
new file mode 100644 (file)
index 0000000..1017ac7
--- /dev/null
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::blocked_range&lt; Value &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#94607755c5110d199202234d58d022ac">blocked_range</a>()</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#14795a36ead1414b4371dbe1a4656359">blocked_range</a>(Value begin_, Value end_, size_type grainsize_=1)</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#4c0efd2be3f96a0ab3ba5085e8b3fcc7">blocked_range</a>(blocked_range &amp;r, split)</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range2d</b> (defined in <a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a>)</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range3d</b> (defined in <a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a>)</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a> typedef</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>() const </td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> typedef</td><td><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00005.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00005.html
new file mode 100644 (file)
index 0000000..d51ea11
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::blocked_range2d&lt; RowValue, ColValue &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range2d</b>(RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize) (defined in <a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range2d</b>(RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end) (defined in <a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range2d</b>(blocked_range2d &amp;r, split) (defined in <a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>col_range_type</b> typedef (defined in <a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00153.html#392a46759af2c884957115771affa7f4">cols</a>() const </td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00153.html#d144cb2d88cef553420311aca8667a44">empty</a>() const </td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">is_divisible</a>() const </td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00153.html#a807a22fe658ec38b8edfd69521d0383">row_range_type</a> typedef</td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00153.html#f496e7348a82652fba581203477cc07c">rows</a>() const </td><td><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00006.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00006.html
new file mode 100644 (file)
index 0000000..ee0f171
--- /dev/null
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range3d</b>(PageValue page_begin, PageValue page_end, RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end) (defined in <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range3d</b>(PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize) (defined in <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>blocked_range3d</b>(blocked_range3d &amp;r, split) (defined in <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>col_range_type</b> typedef (defined in <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">cols</a>() const </td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#356860e1c977d91711e8216bd55c0b25">empty</a>() const </td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">is_divisible</a>() const </td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#b8ebf17a552ba47825e9b3887855b719">page_range_type</a> typedef</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#cf971430aa12361d3ed245344b7c6764">pages</a>() const </td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>row_range_type</b> typedef (defined in <a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>)</td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00154.html#1584623e59ff32a8aa82006827508be4">rows</a>() const </td><td><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00007.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00007.html
new file mode 100644 (file)
index 0000000..f6c0ceb
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::cache_aligned_allocator&lt; T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(reference x) const  (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(const_reference x) const  (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">allocate</a>(size_type n, const void *hint=0)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>cache_aligned_allocator</b>() (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>cache_aligned_allocator</b>(const cache_aligned_allocator &amp;) (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>cache_aligned_allocator</b>(const cache_aligned_allocator&lt; U &gt; &amp;) (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00155.html#958ee8745c86c275bfc9533af565e017">construct</a>(pointer p, const value_type &amp;value)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">deallocate</a>(pointer p, size_type)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">destroy</a>(pointer p)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">max_size</a>() const </td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00009.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00009.html
new file mode 100644 (file)
index 0000000..8ea11e8
--- /dev/null
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::cache_aligned_allocator&lt; void &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00011.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00011.html
new file mode 100644 (file)
index 0000000..92c21a2
--- /dev/null
@@ -0,0 +1,42 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::combinable&lt; T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>clear</b>() (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combinable</b>() (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combinable</b>(finit _finit) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combinable</b>(const combinable &amp;other) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combine</b>(combine_func_t f_combine) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combine_each</b>(combine_func_t f_combine) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>local</b>() (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>local</b>(bool &amp;exists) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const combinable &amp;other) (defined in <a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>)</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00158.html#2c87e79ae98588a5780f708773388843">~combinable</a>()</td><td><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00012.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00012.html
new file mode 100644 (file)
index 0000000..64d9588
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tbb_hash_compare&lt; Key &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>equal</b>(const Key &amp;a, const Key &amp;b) (defined in <a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a>)</td><td><a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a></td><td><code> [inline, static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>hash</b>(const Key &amp;a) (defined in <a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a>)</td><td><a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a></td><td><code> [inline, static]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00013.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00013.html
new file mode 100644 (file)
index 0000000..cbd5747
--- /dev/null
@@ -0,0 +1,95 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>allocator_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>begin</b>() (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>begin</b>() const  (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#414d15033d36c63aa3a40666dc4d6f5e">bucket_count</a>() const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#a9f89be8fe28835749529d91081a2511">clear</a>()</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#d827bb5e4f61de1916ab67d51c7c6e60">concurrent_hash_map</a>(const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#a4612d5c7233712d455496641e9b31ff">concurrent_hash_map</a>(size_type n, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#df0cd14eaddb17f10929c91519e65be9">concurrent_hash_map</a>(const concurrent_hash_map &amp;table, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#3bfe75fcb350ce39cf610c164f233edc">concurrent_hash_map</a>(I first, I last, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_accessor</b> (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_range_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">count</a>(const Key &amp;key) const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>delete_node</b>(node_base *n) (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">empty</a>() const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>end</b>() (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>end</b>() const  (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>equal_range</b>(const Key &amp;key) (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>equal_range</b>(const Key &amp;key) const  (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#034c3b3ee419edee78e0f2f2b1f0d7ca">erase</a>(const Key &amp;key)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#da7e4a50f6bb06191817425ec85fe760">erase</a>(const_accessor &amp;item_accessor)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">erase</a>(accessor &amp;item_accessor)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#faad2108bd2be75e52293486af59f11e">exclude</a>(const_accessor &amp;item_accessor, bool readonly)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#64338d7f2e35df586af4cb0145cd910f">find</a>(const_accessor &amp;result, const Key &amp;key) const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#bce7bdf46435115a95cca2aa73c5da83">find</a>(accessor &amp;result, const Key &amp;key)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#199208eed6f09e200cda364f906be0fe">get_allocator</a>() const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#58c38b27273de6c670568633c0931854">insert</a>(const_accessor &amp;result, const Key &amp;key)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#ccfecaa3e71d92be61fb3d811dd264eb">insert</a>(accessor &amp;result, const Key &amp;key)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#d4a2816129e38c53128c6d0c7b6b7370">insert</a>(const_accessor &amp;result, const value_type &amp;value)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#a657e61cd2b13164764ca2708875784a">insert</a>(accessor &amp;result, const value_type &amp;value)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#52bffd1066b3d7b793945bc6fa1a71a1">insert</a>(const value_type &amp;value)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#cfe172677e5987004ef4a03e22fa338a">insert</a>(I first, I last)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::hash_map_iterator</b> (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::hash_map_range</b> (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#3c27779fe66b79505390d084310d997e">internal_copy</a>(const concurrent_hash_map &amp;source)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal_copy</b>(I first, I last) (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#976c57edfb7f22b9f91a2e11f141eb4a">internal_equal_range</a>(const Key &amp;key, I end) const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#2f76ed101a0ccc8875b846c2f747897e">internal_fast_find</a>(const Key &amp;key) const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>key_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#1f22480a290ddc6c145888d8f985531a">lookup</a>(bool op_insert, const Key &amp;key, const T *t, const_accessor *result, bool write)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>mapped_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#1e45d3cbd1e2ae06f365f1b48e0df0b5">max_size</a>() const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>my_allocator</b> (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>my_hash_compare</b> (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>node_allocator_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">operator=</a>(const concurrent_hash_map &amp;table)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>range</b>(size_type grainsize=1) (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>range</b>(size_type grainsize=1) const  (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>range_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#13f3f2e8de7564be03882c31559493c9">rehash</a>(size_type n=0)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>rehash_bucket</b>(bucket *b_new, const hashcode_t h) (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>search_bucket</b>(const key_type &amp;key, bucket *b) const  (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">size</a>() const </td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">swap</a>(concurrent_hash_map &amp;table)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>)</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00160.html#a1ac58997d8fbf242b266e3691573481">~concurrent_hash_map</a>()</td><td><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00015.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00015.html
new file mode 100644 (file)
index 0000000..34df2b9
--- /dev/null
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor Member List</h1>This is the complete list of members for <a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00162.html#26b4fe0ca87a7ad4852cb787db880119">acquire</a>(concurrent_hash_map *base, const hashcode_t h, bool writer=false)</td><td><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>bucket_accessor</b>(concurrent_hash_map *base, const hashcode_t h, bool writer=false) (defined in <a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a>)</td><td><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">is_writer</a>()</td><td><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00162.html#57c6110bd20e95c06de5a199de988941">operator()</a>()</td><td><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>upgrade_to_writer</b>() (defined in <a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a>)</td><td><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00016.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00016.html
new file mode 100644 (file)
index 0000000..15ccda5
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor Member List</h1>This is the complete list of members for <a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>accessor</b> (defined in <a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>)</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</b> (defined in <a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>)</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">const_accessor</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#5cce3104cb0a52e08d2131370871c614">empty</a>() const </td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#30f31106840700a4c3664b9cb1c31ca7">operator *</a>() const </td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#3d03a48ecb8cd9549bd8be64b09c9b0d">operator-&gt;</a>() const </td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#48647ca0d79c1233b997f5768403c926">value_type</a> typedef</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#752b0c1ec74b94786403a75e42917d01">~const_accessor</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00017.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00017.html
new file mode 100644 (file)
index 0000000..abddfce
--- /dev/null
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor Member List</h1>This is the complete list of members for <a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">const_accessor</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#5cce3104cb0a52e08d2131370871c614">empty</a>() const </td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00161.html#e8938f0cd1211e88a1d73527ed3636c4">operator *</a>() const </td><td><a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00161.html#fcebc32c020202cc37e60eadef157569">operator-&gt;</a>() const </td><td><a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00161.html#49eec74f272bab187d176c0d9d16a7fe">value_type</a> typedef</td><td><a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00163.html#752b0c1ec74b94786403a75e42917d01">~const_accessor</a>()</td><td><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00018.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00018.html
new file mode 100644 (file)
index 0000000..43e18de
--- /dev/null
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::strict_ppl::concurrent_queue&lt; T, A &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">clear</a>()</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">concurrent_queue</a>(const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#25209656c84f2f9b030e2f9162713341">concurrent_queue</a>(InputIterator begin, InputIterator end, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#8a6b98ea11a867db8ac868f0113ca429">concurrent_queue</a>(const concurrent_queue &amp;src, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">const_reference</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">difference_type</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">empty</a>() const </td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">get_allocator</a>() const </td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::concurrent_queue_iterator</b> (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">push</a>(const T &amp;source)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">reference</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">size_type</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">try_pop</a>(T &amp;result)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() const  (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() const  (defined in <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#eaa35a5274606779802e9a669a706260">unsafe_size</a>() const </td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#682c3978d5cb0620000994f11c44a476">value_type</a> typedef</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00164.html#830b33753d6b149c366344e29b2edd8c">~concurrent_queue</a>()</td><td><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00019.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00019.html
new file mode 100644 (file)
index 0000000..b102739
--- /dev/null
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::concurrent_bounded_queue&lt; T, A &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">capacity</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">clear</a>()</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a>(const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#7b7fb414d2eaa8a7f5d68fc4cd63ac50">concurrent_bounded_queue</a>(const concurrent_bounded_queue &amp;src, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">concurrent_bounded_queue</a>(InputIterator begin, InputIterator end, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">const_reference</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">difference_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">empty</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">get_allocator</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::concurrent_queue_iterator</b> (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">pop</a>(T &amp;destination)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">push</a>(const T &amp;source)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">reference</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">set_capacity</a>(size_type new_capacity)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">size</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop</a>(T &amp;destination)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">try_push</a>(const T &amp;source)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() const  (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() const  (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#98245517a931e5893f6601e66c51fc75">value_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">~concurrent_bounded_queue</a>()</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00021.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00021.html
new file mode 100644 (file)
index 0000000..04fa71e
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::deprecated::concurrent_queue&lt; T, A &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>begin</b>() (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>begin</b>() const  (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">capacity</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">clear</a>()</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a>(const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#7b7fb414d2eaa8a7f5d68fc4cd63ac50">concurrent_bounded_queue</a>(const concurrent_bounded_queue &amp;src, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">concurrent_bounded_queue</a>(InputIterator begin, InputIterator end, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">concurrent_queue</a>(const A &amp;a=A())</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00165.html#fc092b9082f233482f3513fc3bb670f7">concurrent_queue</a>(const concurrent_queue &amp;src, const A &amp;a=A())</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00165.html#9102b897776bd2d9e908e6604ff16b5f">concurrent_queue</a>(InputIterator b, InputIterator e, const A &amp;a=A())</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">const_reference</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">difference_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">empty</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>end</b>() (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>end</b>() const  (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">get_allocator</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::concurrent_queue_iterator</b> (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">pop</a>(T &amp;destination)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00165.html#48da3536245318af6cb5fd58bac78039">pop_if_present</a>(T &amp;destination)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">push</a>(const T &amp;source)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">push_if_not_full</a>(const T &amp;source)</td><td><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">reference</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">set_capacity</a>(size_type new_capacity)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">size</a>() const </td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop</a>(T &amp;destination)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">try_push</a>(const T &amp;source)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_begin</b>() const  (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>unsafe_end</b>() const  (defined in <a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>)</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#98245517a931e5893f6601e66c51fc75">value_type</a> typedef</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">~concurrent_bounded_queue</a>()</td><td><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00026.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00026.html
new file mode 100644 (file)
index 0000000..c447f11
--- /dev/null
@@ -0,0 +1,102 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::concurrent_vector&lt; T, A &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>allocator_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#423e5aa15e0e3309ad86d026fd85f6f6">assign</a>(size_type n, const_reference t)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#93a06b3112cb804f42f40efb5e7387b4">assign</a>(I first, I last)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#0c073ca43e787c7cbf7b0e26d2221748">at</a>(size_type index)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#23e14a38af748edff96a7adc3a0f1c58">at</a>(size_type index) const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#41ce48d6015a1a2812d41cf620ec3476">back</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#bd518e204107d07fd08d0ec5bdfd383d">back</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#730b23a251ecb6d37f692fb22f38e029">begin</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#78a06182276ff758788d4c0623ae0d71">begin</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#3ed6b9ae7217af5103d974045b6f5cd5">capacity</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#f88fcf1c920693c39bd9709db33c199f">cbegin</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#0c15a5d0f1cf75d687dabba07da1d46b">cend</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">clear</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">compact</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#2c8ca9cabfcd30ad5943324c853664b5">concurrent_vector</a>(const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#dd8a200b99a8088435a37934b58fe335">concurrent_vector</a>(const concurrent_vector &amp;vector, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#64432f13f7b29bfe4acfb5568f34f3a8">concurrent_vector</a>(const concurrent_vector&lt; T, M &gt; &amp;vector, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#2a2e261dfe1cab3f73f7b1a94137cfca">concurrent_vector</a>(size_type n)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#3883a8a908b44e249a57f454de3f55d8">concurrent_vector</a>(size_type n, const_reference t, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#4450de83c5862ea4bcd9443fd7e67419">concurrent_vector</a>(I first, I last, const allocator_type &amp;a=allocator_type())</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_range_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reverse_iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reverse_iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#db78a1d28c9c966050e8a2926d834a33">crbegin</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#fff9cece89438587997ebedf93c5e962">crend</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">empty</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#c0b51160e5a764982ec97a455f94f2c6">end</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#1e6aa764ce5a1cbd24526f68bc0a2f6b">end</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#15181759c0bfa2ddce5d10c7550e0002">front</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#502615a858eb9fa0390ee59169065e90">front</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">get_allocator</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#30484e3959892fd5392fa93c873c31f0">grow_by</a>(size_type delta)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#c8177b1865270ea68aa1ab9148e5e35e">grow_by</a>(size_type delta)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#38274ab3f772ecba600c7daca7690102">grow_by</a>(size_type delta, const_reference t)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#473a59a4c9308b93411b898b3110d26c">grow_by</a>(size_type delta, const_reference t)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#47fe588214dd5fa06ab6e8ab78d83874">grow_to_at_least</a>(size_type n)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#a7e3b67c8ccab16d0aecc80899ae799d">grow_to_at_least</a>(size_type n)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::vector_iterator</b> (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal_vector_base</b>() const  (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">max_size</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#691f0f3cda3e489c37a657016e375eaf">operator=</a>(const concurrent_vector &amp;vector)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#19f4ab88a01b0fd056af3bba463e7bd6">operator=</a>(const concurrent_vector&lt; T, M &gt; &amp;vector)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#4c52f2950bb1832886bd4458eb09d7eb">operator[]</a>(size_type index)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#c6fade5c732cc95274d1d8277ea619d1">operator[]</a>(size_type index) const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a>(const_reference item) iterator push_back(const _reference item)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#a4c6ffff3bf08b92939aa2fc516edfba">range</a>(size_t grainsize=1)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#3d09ccfb581b879ae64203741035e193">range</a>(size_t grainsize=1) const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>range_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#5e220926d09236d98f04fe0721e5f9a1">rbegin</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#9f9c103e18d5f212703805354074ad44">rbegin</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#290119a4eb43cd6a9e98fa17016ba3c2">rend</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#d438b9b32ea3a8ffb703015b6dce055b">rend</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#5a0ce05026994b010018f72cfdeb72c1">reserve</a>(size_type n)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#8dfb0cb0eef96d440b4dcf801807a718">resize</a>(size_type n)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#98ce6b2c6d2622f0c030b46dfac3880c">resize</a>(size_type n, const_reference t)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reverse_iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reverse_iterator</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">shrink_to_fit</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a>() const </td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">swap</a>(concurrent_vector &amp;vector)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>)</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00166.html#da2444b28bb840d38f60d0030333a5fc">~concurrent_vector</a>()</td><td><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00031.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00031.html
new file mode 100644 (file)
index 0000000..5cc4f2a
--- /dev/null
@@ -0,0 +1,66 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00168.html#3c03eb40955b933b01987222722ac4bd">allocator_type</a> typedef</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#01d7baf8e913ab2819e97917a2ac795f">begin</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#84afb3906a39e399cde1c950d6351300">begin</a>() const </td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#a8764176d4b6014c5d65f1051851abc8">clear</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combine</b>(combine_func_t f_combine) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>combine_each</b>(combine_func_t f_combine) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_iterator</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_range_type</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#72595886d0ac8fd0543f90038570510d">empty</a>() const </td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#2456ff88252fc921b01cd8907628a4ee">end</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#cb448bb4977ce366ceb7344085cc7050">end</a>() const </td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>enumerable_thread_specific</b> (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#0cfa3a5c8f2be3bbf313d93b1fa8cdb3">enumerable_thread_specific</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#8d4b456ff9d7b289c73254eccc11db45">enumerable_thread_specific</a>(Finit finit)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#7bce6829981c9efe3f59cae2355e383e">enumerable_thread_specific</a>(const T &amp;exemplar)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>enumerable_thread_specific</b>(const enumerable_thread_specific&lt; U, Alloc, Cachetype &gt; &amp;other) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>enumerable_thread_specific</b>(const enumerable_thread_specific &amp;other) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>iterator</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#c8d5265ccbd5e4485996b3f3baaa5ba1">local</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#7dc79058d2832f7447de8e691c3455ea">local</a>(bool &amp;exists)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const enumerable_thread_specific &amp;other) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const enumerable_thread_specific&lt; U, Alloc, Cachetype &gt; &amp;other) (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#b22c54990b2362cd93c1a8f73de140bc">range</a>(size_t grainsize=1)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#3b068000cf4dbf9b40f8bb7e3fc53e0b">range</a>(size_t grainsize=1) const </td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>range_type</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#33fd6593da1ed14340f10f67d5a69130">size</a>() const </td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>)</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00168.html#5a7907d9e3e5b18e7a7b55211ef3213f">~enumerable_thread_specific</a>()</td><td><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00034.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00034.html
new file mode 100644 (file)
index 0000000..3462ed3
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::mutex Member List</h1>This is the complete list of members for <a class="el" href="a00177.html">tbb::mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>DESTROYED</b> enum value (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>HELD</b> enum value (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>INITIALIZED</b> enum value (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">lock</a>()</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#05313cb77d4f85213103d4dab74ed454">mutex</a>()</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>native_handle</b>() (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a> typedef</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>native_handle_type</b> typedef (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">set_state</a>(state_t to)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>state_t</b> enum name (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#4331652c79dea1c1131bd59ab161b234">try_lock</a>()</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">unlock</a>()</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~mutex</b>() (defined in <a class="el" href="a00177.html">tbb::mutex</a>)</td><td><a class="el" href="a00177.html">tbb::mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00035.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00035.html
new file mode 100644 (file)
index 0000000..8b03a51
--- /dev/null
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00178.html">tbb::mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00178.html#862e022841cdc522e4296a5533b22efd">acquire</a>(mutex &amp;mutex)</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>mutex</b> (defined in <a class="el" href="a00178.html">tbb::mutex::scoped_lock</a>)</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">release</a>()</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00178.html#1d403ae51b484df5d86d85ae38f11e6e">scoped_lock</a>()</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00178.html#605a6b9af0f8cdabdf81825e0de99600">scoped_lock</a>(mutex &amp;mutex)</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">try_acquire</a>(mutex &amp;mutex)</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">~scoped_lock</a>()</td><td><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00036.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00036.html
new file mode 100644 (file)
index 0000000..9d6b684
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::null_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00179.html">tbb::null_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00179.html">tbb::null_mutex</a>)</td><td><a class="el" href="a00179.html">tbb::null_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00179.html">tbb::null_mutex</a>)</td><td><a class="el" href="a00179.html">tbb::null_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00179.html">tbb::null_mutex</a>)</td><td><a class="el" href="a00179.html">tbb::null_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>null_mutex</b>() (defined in <a class="el" href="a00179.html">tbb::null_mutex</a>)</td><td><a class="el" href="a00179.html">tbb::null_mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00037.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00037.html
new file mode 100644 (file)
index 0000000..ffaf111
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::null_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>acquire</b>(null_mutex &amp;) (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>release</b>() (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b>() (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b>(null_mutex &amp;) (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>try_acquire</b>(null_mutex &amp;) (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~scoped_lock</b>() (defined in <a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>)</td><td><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00038.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00038.html
new file mode 100644 (file)
index 0000000..de2569e
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::null_rw_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00181.html">tbb::null_rw_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00181.html">tbb::null_rw_mutex</a>)</td><td><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00181.html">tbb::null_rw_mutex</a>)</td><td><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00181.html">tbb::null_rw_mutex</a>)</td><td><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>null_rw_mutex</b>() (defined in <a class="el" href="a00181.html">tbb::null_rw_mutex</a>)</td><td><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00039.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00039.html
new file mode 100644 (file)
index 0000000..50e96a0
--- /dev/null
@@ -0,0 +1,40 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::null_rw_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>acquire</b>(null_rw_mutex &amp;, bool=true) (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>downgrade_to_reader</b>() (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>release</b>() (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b>() (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b>(null_rw_mutex &amp;, bool=true) (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>try_acquire</b>(null_rw_mutex &amp;, bool=true) (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>upgrade_to_writer</b>() (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~scoped_lock</b>() (defined in <a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>)</td><td><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00040.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00040.html
new file mode 100644 (file)
index 0000000..270bb0f
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::parallel_do_feeder&lt; Item &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">add</a>(const Item &amp;item)</td><td><a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::parallel_do_feeder_impl</b> (defined in <a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a>)</td><td><a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a></td><td><code> [friend]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00041.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00041.html
new file mode 100644 (file)
index 0000000..86fcbe7
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::pre_scan_tag Member List</h1>This is the complete list of members for <a class="el" href="a00186.html">tbb::pre_scan_tag</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_final_scan</b>() (defined in <a class="el" href="a00186.html">tbb::pre_scan_tag</a>)</td><td><a class="el" href="a00186.html">tbb::pre_scan_tag</a></td><td><code> [inline, static]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00042.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00042.html
new file mode 100644 (file)
index 0000000..df5ba30
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::final_scan_tag Member List</h1>This is the complete list of members for <a class="el" href="a00171.html">tbb::final_scan_tag</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_final_scan</b>() (defined in <a class="el" href="a00171.html">tbb::final_scan_tag</a>)</td><td><a class="el" href="a00171.html">tbb::final_scan_tag</a></td><td><code> [inline, static]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00043.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00043.html
new file mode 100644 (file)
index 0000000..8ed2a35
--- /dev/null
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::parallel_while&lt; Body &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00184.html#e131c560057a58229992b61eb8dba4c6">add</a>(const value_type &amp;item)</td><td><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">parallel_while</a>()</td><td><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">run</a>(Stream &amp;stream, const Body &amp;body)</td><td><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a> typedef</td><td><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">~parallel_while</a>()</td><td><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00044.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00044.html
new file mode 100644 (file)
index 0000000..5aa90ef
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::simple_partitioner Member List</h1>This is the complete list of members for <a class="el" href="a00198.html">tbb::simple_partitioner</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_for</b> (defined in <a class="el" href="a00198.html">tbb::simple_partitioner</a>)</td><td><a class="el" href="a00198.html">tbb::simple_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_reduce</b> (defined in <a class="el" href="a00198.html">tbb::simple_partitioner</a>)</td><td><a class="el" href="a00198.html">tbb::simple_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_scan</b> (defined in <a class="el" href="a00198.html">tbb::simple_partitioner</a>)</td><td><a class="el" href="a00198.html">tbb::simple_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>simple_partitioner</b>() (defined in <a class="el" href="a00198.html">tbb::simple_partitioner</a>)</td><td><a class="el" href="a00198.html">tbb::simple_partitioner</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00046.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00046.html
new file mode 100644 (file)
index 0000000..e1285d5
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::auto_partitioner Member List</h1>This is the complete list of members for <a class="el" href="a00150.html">tbb::auto_partitioner</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>auto_partitioner</b>() (defined in <a class="el" href="a00150.html">tbb::auto_partitioner</a>)</td><td><a class="el" href="a00150.html">tbb::auto_partitioner</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_for</b> (defined in <a class="el" href="a00150.html">tbb::auto_partitioner</a>)</td><td><a class="el" href="a00150.html">tbb::auto_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_reduce</b> (defined in <a class="el" href="a00150.html">tbb::auto_partitioner</a>)</td><td><a class="el" href="a00150.html">tbb::auto_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_scan</b> (defined in <a class="el" href="a00150.html">tbb::auto_partitioner</a>)</td><td><a class="el" href="a00150.html">tbb::auto_partitioner</a></td><td><code> [friend]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00048.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00048.html
new file mode 100644 (file)
index 0000000..f81c01b
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::affinity_partitioner Member List</h1>This is the complete list of members for <a class="el" href="a00145.html">tbb::affinity_partitioner</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>affinity_partitioner</b>() (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::affinity_partition_type</b> (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_for</b> (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_reduce</b> (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_reduce_with_affinity</b> (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::start_scan</b> (defined in <a class="el" href="a00145.html">tbb::affinity_partitioner</a>)</td><td><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td><code> [friend]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00049.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00049.html
new file mode 100644 (file)
index 0000000..7ccc01f
--- /dev/null
@@ -0,0 +1,42 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::filter Member List</h1>This is the complete list of members for <a class="el" href="a00169.html">tbb::filter</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00169.html#56275eb889c77c4807967133e21401bd">finalize</a>(void *)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::pipeline_root_task</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::stage_task</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">is_bound</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#cd53206c4795ef2df5df26b795caf692">is_ordered</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#fcfec27656a69ff2072802ac001e936f">is_serial</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">operator()</a>(void *item)=0</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pipeline</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>thread_bound_filter</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">~filter</a>()</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00050.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00050.html
new file mode 100644 (file)
index 0000000..928c243
--- /dev/null
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::thread_bound_filter Member List</h1>This is the complete list of members for <a class="el" href="a00214.html">tbb::thread_bound_filter</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>end_of_stream</b> enum value (defined in <a class="el" href="a00214.html">tbb::thread_bound_filter</a>)</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#56275eb889c77c4807967133e21401bd">finalize</a>(void *)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::pipeline_root_task</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::stage_task</b> (defined in <a class="el" href="a00169.html">tbb::filter</a>)</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">is_bound</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#cd53206c4795ef2df5df26b795caf692">is_ordered</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#fcfec27656a69ff2072802ac001e936f">is_serial</a>() const </td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>item_not_available</b> enum value (defined in <a class="el" href="a00214.html">tbb::thread_bound_filter</a>)</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">operator()</a>(void *item)=0</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00214.html#5e726bdc7fbd924c0b07bd558b1d4d5d">process_item</a>()</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>result_type</b> enum name (defined in <a class="el" href="a00214.html">tbb::thread_bound_filter</a>)</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>success</b> enum value (defined in <a class="el" href="a00214.html">tbb::thread_bound_filter</a>)</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>thread_bound_filter</b>(mode filter_mode) (defined in <a class="el" href="a00214.html">tbb::thread_bound_filter</a>)</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00214.html#c4f90f2c771bce748beb9be734fa286c">try_process_item</a>()</td><td><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">~filter</a>()</td><td><a class="el" href="a00169.html">tbb::filter</a></td><td><code> [virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00051.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00051.html
new file mode 100644 (file)
index 0000000..a3d8e37
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::pipeline Member List</h1>This is the complete list of members for <a class="el" href="a00185.html">tbb::pipeline</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00185.html#38fb5c9c8395dd6f89a4ae2011a83e0d">add_filter</a>(filter &amp;filter_)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00185.html#2c84aef5b834b555ee220b176e25931e">clear</a>()</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>filter</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::pipeline_cleaner</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::pipeline_root_task</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::stage_task</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00185.html#596dc3beba27099c4c8581cb419e1a59">pipeline</a>()</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00185.html#f627616049b3fe36801f37ee40403ef8">run</a>(size_t max_number_of_live_tokens)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00185.html#93d7fec8cd607b803dd2d79fb46bd260">run</a>(size_t max_number_of_live_tokens, tbb::task_group_context &amp;context)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>tbb::interface5::internal::pipeline_proxy</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>thread_bound_filter</b> (defined in <a class="el" href="a00185.html">tbb::pipeline</a>)</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00185.html#49513c6c24f9d5bbbb27edca5efe01c9">~pipeline</a>()</td><td><a class="el" href="a00185.html">tbb::pipeline</a></td><td><code> [virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00052.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00052.html
new file mode 100644 (file)
index 0000000..50d7678
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::flow_control Member List</h1>This is the complete list of members for <a class="el" href="a00172.html">tbb::interface5::flow_control</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>internal::concrete_filter</b> (defined in <a class="el" href="a00172.html">tbb::interface5::flow_control</a>)</td><td><a class="el" href="a00172.html">tbb::interface5::flow_control</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>stop</b>() (defined in <a class="el" href="a00172.html">tbb::interface5::flow_control</a>)</td><td><a class="el" href="a00172.html">tbb::interface5::flow_control</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00053.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00053.html
new file mode 100644 (file)
index 0000000..863a77c
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::filter_t&lt; T, U &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>clear</b>() (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>filter_t</b>() (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>filter_t</b>(const filter_t&lt; T, U &gt; &amp;rhs) (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>filter_t</b>(tbb::filter::mode mode, const Body &amp;body) (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::pipeline_proxy</b> (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">make_filter</a>(tbb::filter::mode, const Body &amp;)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator &amp;</b> (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const filter_t&lt; T, U &gt; &amp;rhs) (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~filter_t</b>() (defined in <a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>)</td><td><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00054.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00054.html
new file mode 100644 (file)
index 0000000..b07133c
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::queuing_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00187.html">tbb::queuing_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>internal_construct</b>() (defined in <a class="el" href="a00187.html">tbb::queuing_mutex</a>)</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00187.html">tbb::queuing_mutex</a>)</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00187.html">tbb::queuing_mutex</a>)</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00187.html">tbb::queuing_mutex</a>)</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">queuing_mutex</a>()</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00187.html">tbb::queuing_mutex</a>)</td><td><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td><code> [friend]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00055.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00055.html
new file mode 100644 (file)
index 0000000..b16bf2b
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::queuing_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00188.html#533e4fc8355ee321206a0609c42d909d">acquire</a>(queuing_mutex &amp;m)</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">release</a>()</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00188.html#db0fa3967491014572e24d6607bdc971">scoped_lock</a>()</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00188.html#9b51ef972f5618ac17caadb58841ab6d">scoped_lock</a>(queuing_mutex &amp;m)</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00188.html#e5a014fb817599386a87170cf2cf51a9">try_acquire</a>(queuing_mutex &amp;m)</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00188.html#ac2c576a93570957d694192a5f491443">~scoped_lock</a>()</td><td><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00056.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00056.html
new file mode 100644 (file)
index 0000000..611f6de
--- /dev/null
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::queuing_rw_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>internal_construct</b>() (defined in <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>)</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>)</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>)</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>)</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">queuing_rw_mutex</a>()</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>)</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">~queuing_rw_mutex</a>()</td><td><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00057.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00057.html
new file mode 100644 (file)
index 0000000..7ee7f18
--- /dev/null
@@ -0,0 +1,40 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::queuing_rw_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">acquire</a>(queuing_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#0d2f93edf7b15ec4bcee138823220c52">downgrade_to_reader</a>()</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#67ae221109ddc69510ab593874e435d4">release</a>()</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#c62e365be7bcbba091c9ea7454a4d22c">scoped_lock</a>()</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#fbb8798792d3aebb136c46fc63d2529e">scoped_lock</a>(queuing_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#2e4ff6c9ec2fee6682f95290d1f42baa">try_acquire</a>(queuing_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#11ba1da4a722c9e6f73339a52c487e82">upgrade_to_writer</a>()</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">~scoped_lock</a>()</td><td><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00058.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00058.html
new file mode 100644 (file)
index 0000000..5e42a41
--- /dev/null
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::reader_writer_lock Member List</h1>This is the complete list of members for <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>active</b> enum value (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>invalid</b> enum value (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#d9d16a24d9f6c3dada73c6b9ff214f5b">lock_read</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">reader_writer_lock</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock_read</b> (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">status_t</a> enum name</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#721eb173e154ab38292273e9266a9b07">try_lock</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#595fb23952e3b89426b1f7938dea9b11">try_lock_read</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#5113b32689305599b2c36b5831547704">unlock</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>waiting</b> enum value (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>waiting_nonblocking</b> enum value (defined in <a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>)</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00191.html#5135f64f7b7339017f33d956445edbee">~reader_writer_lock</a>()</td><td><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00059.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00059.html
new file mode 100644 (file)
index 0000000..a9f0839
--- /dev/null
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::reader_writer_lock::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>operator delete</b>(void *p) (defined in <a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a>)</td><td><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator new</b>(size_t s) (defined in <a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a>)</td><td><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reader_writer_lock</b> (defined in <a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a>)</td><td><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00192.html#cf19f20e082887c1bb0ba6b0911c3583">scoped_lock</a>(reader_writer_lock &amp;lock)</td><td><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00192.html#70246e0260493625ff956fa5926fc71f">~scoped_lock</a>()</td><td><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00060.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00060.html
new file mode 100644 (file)
index 0000000..f12a8ff
--- /dev/null
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::reader_writer_lock::scoped_lock_read Member List</h1>This is the complete list of members for <a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>operator delete</b>(void *p) (defined in <a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a>)</td><td><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator new</b>(size_t s) (defined in <a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a>)</td><td><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reader_writer_lock</b> (defined in <a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a>)</td><td><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00193.html#87ab0dc8f7216e6ba0f7acd6aec33064">scoped_lock_read</a>(reader_writer_lock &amp;lock)</td><td><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">~scoped_lock_read</a>()</td><td><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00061.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00061.html
new file mode 100644 (file)
index 0000000..37853a5
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::recursive_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00194.html">tbb::recursive_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">lock</a>()</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>native_handle</b>() (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a> typedef</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>native_handle_type</b> typedef (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">recursive_mutex</a>()</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00194.html#86e719b0afee25704af11ab97694d240">try_lock</a>()</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">unlock</a>()</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~recursive_mutex</b>() (defined in <a class="el" href="a00194.html">tbb::recursive_mutex</a>)</td><td><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00062.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00062.html
new file mode 100644 (file)
index 0000000..4ae243f
--- /dev/null
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::recursive_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">acquire</a>(recursive_mutex &amp;mutex)</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>recursive_mutex</b> (defined in <a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a>)</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">release</a>()</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00195.html#d82d4d36fbf9727a493d26ae50855fe7">scoped_lock</a>()</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00195.html#dec17713c4c1321ac8fec66816d0c602">scoped_lock</a>(recursive_mutex &amp;mutex)</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">try_acquire</a>(recursive_mutex &amp;mutex)</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">~scoped_lock</a>()</td><td><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00063.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00063.html
new file mode 100644 (file)
index 0000000..aeccd79
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::scalable_allocator&lt; T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(reference x) const  (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(const_reference x) const  (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">allocate</a>(size_type n, const void *=0)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>construct</b>(pointer p, const value_type &amp;value) (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">deallocate</a>(pointer p, size_type)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>destroy</b>(pointer p) (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00196.html#880e766f1d913988c21973dbdd874fd5">max_size</a>() const </td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scalable_allocator</b>() (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scalable_allocator</b>(const scalable_allocator &amp;) (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scalable_allocator</b>(const scalable_allocator&lt; U &gt; &amp;) (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00065.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00065.html
new file mode 100644 (file)
index 0000000..9dcd7bc
--- /dev/null
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::scalable_allocator&lt; void &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00067.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00067.html
new file mode 100644 (file)
index 0000000..e5fd5b7
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::spin_mutex Member List</h1>This is the complete list of members for <a class="el" href="a00199.html">tbb::spin_mutex</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>internal_construct</b>() (defined in <a class="el" href="a00199.html">tbb::spin_mutex</a>)</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00199.html">tbb::spin_mutex</a>)</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00199.html">tbb::spin_mutex</a>)</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00199.html">tbb::spin_mutex</a>)</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">lock</a>()</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scoped_lock</b> (defined in <a class="el" href="a00199.html">tbb::spin_mutex</a>)</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">spin_mutex</a>()</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">try_lock</a>()</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">unlock</a>()</td><td><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00068.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00068.html
new file mode 100644 (file)
index 0000000..f7d4999
--- /dev/null
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::spin_mutex::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">acquire</a>(spin_mutex &amp;m)</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">release</a>()</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00200.html#29ae680ae7f5e685c2e15535b9c855b3">scoped_lock</a>()</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00200.html#5ce6807050a9e8f87bcb4a65dccb12ef">scoped_lock</a>(spin_mutex &amp;m)</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>spin_mutex</b> (defined in <a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a>)</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">try_acquire</a>(spin_mutex &amp;m)</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">~scoped_lock</a>()</td><td><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00069.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00069.html
new file mode 100644 (file)
index 0000000..031437d
--- /dev/null
@@ -0,0 +1,42 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::spin_rw_mutex_v3 Member List</h1>This is the complete list of members for <a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>is_fair_mutex</b> (defined in <a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a>)</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_recursive_mutex</b> (defined in <a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a>)</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>is_rw_mutex</b> (defined in <a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a>)</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#4007d6e1523dbc3c2bb7f889ab789a8a">lock</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#13f799708ac4ca437a16be202e263e18">lock_read</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">spin_rw_mutex_v3</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#088bb256be794cc47d3b83791632fdfc">try_lock</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#b8667415869013f840d976aa406d385a">try_lock_read</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#f9f52ead2098eb5fb12da59d5ae53b55">unlock</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00201.html#9a815fb2759e55072ed413f1b6970cf3">~spin_rw_mutex_v3</a>()</td><td><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00070.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00070.html
new file mode 100644 (file)
index 0000000..cf9edac
--- /dev/null
@@ -0,0 +1,40 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::spin_rw_mutex_v3::scoped_lock Member List</h1>This is the complete list of members for <a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00202.html#b0b646ec5be02a127d159bbb7ca65353">acquire</a>(spin_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#c2c2c38a08cb9080e87099fac3e5bc94">downgrade_to_reader</a>()</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#61b14d00a78185c9b2d206ebfc379124">release</a>()</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#d6ea60dee5563f702379bf5e51aa8806">scoped_lock</a>()</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#42a92d4f8fdde425b111cfa8a9228071">scoped_lock</a>(spin_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#9879626968d9b9a04cd2ec0fb2e84ae1">try_acquire</a>(spin_rw_mutex &amp;m, bool write=true)</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#3f0b1e3f2efab63336400348bd070226">upgrade_to_writer</a>()</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00202.html#d7eaaa3f2e2c5dc11e7005811b1bdd04">~scoped_lock</a>()</td><td><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00072.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00072.html
new file mode 100644 (file)
index 0000000..43248f4
--- /dev/null
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::interface5::internal::task_base Member List</h1>This is the complete list of members for <a class="el" href="a00205.html">tbb::interface5::internal::task_base</a>, including all inherited members.<p><table>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00073.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00073.html
new file mode 100644 (file)
index 0000000..0d1ac5a
--- /dev/null
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::task_group_context Member List</h1>This is the complete list of members for <a class="el" href="a00206.html">tbb::task_group_context</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>bound</b> enum value (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#8bcdfdf4e6bfb76125b6de15c00b571d">cancel_group_execution</a>()</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>concurrent_wait</b> enum value (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>default_traits</b> enum value (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>exact_exception</b> enum value (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#49a55352084fd44b8863d182e839e6dc">init</a>()</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td><code> [protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_root_with_context_proxy</b> (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#4db72f16210b0a991b2c134d6763a4cc">is_group_execution_cancelled</a>() const </td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>isolated</b> enum value (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>kind_type</b> enum name (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#d97c8a03615594b71b4ef06ff75cf561">register_pending_exception</a>()</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#6d30d16bf1cd22f86c6afaf29c2b430c">reset</a>()</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>task</b> (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">task_group_context</a>(kind_type relation_with_parent=bound, uintptr_t traits=default_traits)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>traits_type</b> enum name (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~task_group_context</b>() (defined in <a class="el" href="a00206.html">tbb::task_group_context</a>)</td><td><a class="el" href="a00206.html">tbb::task_group_context</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00075.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00075.html
new file mode 100644 (file)
index 0000000..e1ecb94
--- /dev/null
@@ -0,0 +1,84 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::task Member List</h1>This is the complete list of members for <a class="el" href="a00204.html">tbb::task</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>add_to_depth</b>(int) (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">affinity</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a> typedef</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">allocate_child</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#1434c79a5138993269d034008bff7329">allocate_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#23acb0da0afd690da797f9f882027d34">allocate_root</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">allocate_root</a>(task_group_context &amp;ctx)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">allocated</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">cancel_group_execution</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">context</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">decrement_ref_count</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>depth</b>() const  (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a>(task &amp;t)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">enqueue</a>(task &amp;t)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute</a>()=0</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">executing</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">freed</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">increment_ref_count</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>interface5::internal::task_base</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_additional_child_of_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_child_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_continuation_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_root_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_root_with_context_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::scheduler</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">is_cancelled</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#c26718b3b247cd13deb1a741902e7105">is_owned_by_current_thread</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#f9169402702f56bf519448aaf34450aa">is_stolen_task</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#713c338c8eeaebdc5a6b10a69c039b06">note_affinity</a>(affinity_id id)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">parent</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">ready</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">recycle</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#db399855177438bbc9cc61d508dae8d2">recycle_as_child_of</a>(task &amp;new_parent)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">recycle_as_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">recycle_as_safe_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">recycle_to_reexecute</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">reexecute</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ad774f55eaec008ae02b236423209ced">ref_count</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">self</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">set_affinity</a>(affinity_id id)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>set_depth</b>(intptr_t) (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">set_ref_count</a>(int count)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3ce28ca9baa771cfc37ecd72e69c4f3c">spawn_and_wait_for_all</a>(task &amp;child)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#894ab68378e502776d8220eea7ce9fa1">spawn_and_wait_for_all</a>(task_list &amp;list)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">spawn_root_and_wait</a>(task &amp;root)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">spawn_root_and_wait</a>(task_list &amp;root_list)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">state</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a> enum name</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>task_list</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#53d2615ad9c38859b4c8080936600283">wait_for_all</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">~task</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00076.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00076.html
new file mode 100644 (file)
index 0000000..b239e0d
--- /dev/null
@@ -0,0 +1,82 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::empty_task Member List</h1>This is the complete list of members for <a class="el" href="a00167.html">tbb::empty_task</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>add_to_depth</b>(int) (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">affinity</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a> typedef</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">allocate_child</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#1434c79a5138993269d034008bff7329">allocate_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#23acb0da0afd690da797f9f882027d34">allocate_root</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">allocate_root</a>(task_group_context &amp;ctx)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">allocated</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">cancel_group_execution</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">context</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">decrement_ref_count</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>depth</b>() const  (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a>(task &amp;t)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">enqueue</a>(task &amp;t)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">executing</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">freed</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">increment_ref_count</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>interface5::internal::task_base</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_additional_child_of_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_child_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_continuation_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_root_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::allocate_root_with_context_proxy</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>internal::scheduler</b> (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">is_cancelled</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#c26718b3b247cd13deb1a741902e7105">is_owned_by_current_thread</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#f9169402702f56bf519448aaf34450aa">is_stolen_task</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#713c338c8eeaebdc5a6b10a69c039b06">note_affinity</a>(affinity_id id)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">parent</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">ready</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">recycle</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#db399855177438bbc9cc61d508dae8d2">recycle_as_child_of</a>(task &amp;new_parent)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">recycle_as_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">recycle_as_safe_continuation</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">recycle_to_reexecute</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">reexecute</a> enum value</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ad774f55eaec008ae02b236423209ced">ref_count</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">self</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">set_affinity</a>(affinity_id id)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>set_depth</b>(intptr_t) (defined in <a class="el" href="a00204.html">tbb::task</a>)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">set_ref_count</a>(int count)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#3ce28ca9baa771cfc37ecd72e69c4f3c">spawn_and_wait_for_all</a>(task &amp;child)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#894ab68378e502776d8220eea7ce9fa1">spawn_and_wait_for_all</a>(task_list &amp;list)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">spawn_root_and_wait</a>(task &amp;root)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">spawn_root_and_wait</a>(task_list &amp;root_list)</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">state</a>() const </td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a> enum name</td><td><a class="el" href="a00204.html">tbb::task</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#53d2615ad9c38859b4c8080936600283">wait_for_all</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">~task</a>()</td><td><a class="el" href="a00204.html">tbb::task</a></td><td><code> [inline, virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00077.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00077.html
new file mode 100644 (file)
index 0000000..6a11dc4
--- /dev/null
@@ -0,0 +1,40 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::task_list Member List</h1>This is the complete list of members for <a class="el" href="a00207.html">tbb::task_list</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00207.html#fce446ee13e025969945328f3ff59b95">clear</a>()</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00207.html#f3ac31e092814b90929f81bb30441959">empty</a>() const </td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>interface5::internal::task_base</b> (defined in <a class="el" href="a00207.html">tbb::task_list</a>)</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00207.html#5fe85df5ed524418389d34051750347d">pop_front</a>()</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00207.html#4cd34756bc4763dafb8c84838a0124ff">push_back</a>(task &amp;task)</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>task</b> (defined in <a class="el" href="a00207.html">tbb::task_list</a>)</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00207.html#416341c2047eaef50417b41eaf7e9de6">task_list</a>()</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00207.html#6d438f1499a02db1e59c24ab6043e5ba">~task_list</a>()</td><td><a class="el" href="a00207.html">tbb::task_list</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00084.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00084.html
new file mode 100644 (file)
index 0000000..26f4ad3
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::task_scheduler_init Member List</h1>This is the complete list of members for <a class="el" href="a00208.html">tbb::task_scheduler_init</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a></td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#ba00714c33a41a3c2216f48613971cab">default_num_threads</a>()</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">deferred</a></td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#d476053cc712e572554823492a5229ce">initialize</a>(int number_of_threads=automatic)</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#d5ed214a8bb53b0466ed91ff4734b9a3">initialize</a>(int number_of_threads, stack_size_type thread_stack_size)</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#12752282977029f23416642bc03e8b74">is_active</a>() const </td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">task_scheduler_init</a>(int number_of_threads=automatic, stack_size_type thread_stack_size=0)</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">terminate</a>()</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">~task_scheduler_init</a>()</td><td><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00086.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00086.html
new file mode 100644 (file)
index 0000000..650a0f4
--- /dev/null
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tbb_allocator&lt; T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(reference x) const  (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>address</b>(const_reference x) const  (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">allocate</a>(size_type n, const void *=0)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#78701e7454ef8e1a25b5acd364367080">allocator_type</a>()</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline, static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">construct</a>(pointer p, const value_type &amp;value)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">deallocate</a>(pointer p, size_type)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#ef133522bf55f05a605bee0763208281">destroy</a>(pointer p)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">malloc_type</a> enum name</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">max_size</a>() const </td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>scalable</b> enum value (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>standard</b> enum value (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>tbb_allocator</b>() (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>tbb_allocator</b>(const tbb_allocator &amp;) (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>tbb_allocator</b>(const tbb_allocator&lt; U &gt; &amp;) (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>)</td><td><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00088.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00088.html
new file mode 100644 (file)
index 0000000..f9de324
--- /dev/null
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tbb_allocator&lt; void &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a>)</td><td><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00090.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00090.html
new file mode 100644 (file)
index 0000000..9395418
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::zero_allocator&lt; T, Allocator &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>allocate</b>(const size_type n, const void *hint=0) (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>base_allocator_type</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_reference</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>difference_type</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reference</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>size_type</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>zero_allocator</b>() (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>zero_allocator</b>(const zero_allocator &amp;a) (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>zero_allocator</b>(const zero_allocator&lt; U &gt; &amp;a) (defined in <a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>)</td><td><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00092.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00092.html
new file mode 100644 (file)
index 0000000..c7779a9
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::zero_allocator&lt; void, Allocator &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>base_allocator_type</b> typedef (defined in <a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>)</td><td><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>const_pointer</b> typedef (defined in <a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>)</td><td><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>pointer</b> typedef (defined in <a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>)</td><td><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>value_type</b> typedef (defined in <a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>)</td><td><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00094.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00094.html
new file mode 100644 (file)
index 0000000..5ecca79
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::bad_last_alloc Member List</h1>This is the complete list of members for <a class="el" href="a00151.html">tbb::bad_last_alloc</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>what</b>() const  (defined in <a class="el" href="a00151.html">tbb::bad_last_alloc</a>)</td><td><a class="el" href="a00151.html">tbb::bad_last_alloc</a></td><td></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~bad_last_alloc</b>() (defined in <a class="el" href="a00151.html">tbb::bad_last_alloc</a>)</td><td><a class="el" href="a00151.html">tbb::bad_last_alloc</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00095.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00095.html
new file mode 100644 (file)
index 0000000..7754e54
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::improper_lock Member List</h1>This is the complete list of members for <a class="el" href="a00173.html">tbb::improper_lock</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>what</b>() const  (defined in <a class="el" href="a00173.html">tbb::improper_lock</a>)</td><td><a class="el" href="a00173.html">tbb::improper_lock</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00096.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00096.html
new file mode 100644 (file)
index 0000000..dfcc946
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::missing_wait Member List</h1>This is the complete list of members for <a class="el" href="a00175.html">tbb::missing_wait</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>what</b>() const  (defined in <a class="el" href="a00175.html">tbb::missing_wait</a>)</td><td><a class="el" href="a00175.html">tbb::missing_wait</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00097.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00097.html
new file mode 100644 (file)
index 0000000..457258d
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::invalid_multiple_scheduling Member List</h1>This is the complete list of members for <a class="el" href="a00174.html">tbb::invalid_multiple_scheduling</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>what</b>() const  (defined in <a class="el" href="a00174.html">tbb::invalid_multiple_scheduling</a>)</td><td><a class="el" href="a00174.html">tbb::invalid_multiple_scheduling</a></td><td></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00098.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00098.html
new file mode 100644 (file)
index 0000000..99d7fca
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tbb_exception Member List</h1>This is the complete list of members for <a class="el" href="a00211.html">tbb::tbb_exception</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">destroy</a>()=0</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move</a>()=0</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#d00f6497e552fee978a02bfcbebf46e2">name</a>() const =0</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">operator delete</a>(void *p)</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">throw_self</a>()=0</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [pure virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">what</a>() const =0</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [pure virtual]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00099.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00099.html
new file mode 100644 (file)
index 0000000..6ce5fe6
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::captured_exception Member List</h1>This is the complete list of members for <a class="el" href="a00157.html">tbb::captured_exception</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>captured_exception</b>(const captured_exception &amp;src) (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>captured_exception</b>(const char *name_, const char *info) (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>clear</b>() (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00157.html#93d875d3555502ff6f18513525de204c">destroy</a>()</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">move</a>()</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">name</a>() const </td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">operator delete</a>(void *p)</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const captured_exception &amp;src) (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>set</b>(const char *name, const char *info) (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">throw_self</a>()</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">what</a>() const </td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~captured_exception</b>() (defined in <a class="el" href="a00157.html">tbb::captured_exception</a>)</td><td><a class="el" href="a00157.html">tbb::captured_exception</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00100.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00100.html
new file mode 100644 (file)
index 0000000..f3429d9
--- /dev/null
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::movable_exception&lt; ExceptionData &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>data</b>() (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>data</b>() const  (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">destroy</a>()</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>movable_exception</b>(const ExceptionData &amp;data_) (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>movable_exception</b>(const movable_exception &amp;src) (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">move</a>()</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a></td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [protected]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">name</a>() const </td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">operator delete</a>(void *p)</td><td><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>operator=</b>(const movable_exception &amp;src) (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#17cffba35811c92b7e65d63506b69602">throw_self</a>()</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">what</a>() const </td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline, virtual]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>~movable_exception</b>() (defined in <a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>)</td><td><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00101.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00101.html
new file mode 100644 (file)
index 0000000..a3f0a57
--- /dev/null
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::internal::tbb_exception_ptr Member List</h1>This is the complete list of members for <a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>allocate</b>() (defined in <a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a>)</td><td><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td><code> [static]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>allocate</b>(const tbb_exception &amp;tag) (defined in <a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a>)</td><td><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00212.html#c35e5db8e9cdff5d1387db5b0bad2e4a">allocate</a>(captured_exception &amp;src)</td><td><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td><code> [static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00212.html#921875bbacd2c8a5f324c7da7a415262">destroy</a>()</td><td><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00212.html#292832fd5c523e3d8081a22247840a1d">throw_self</a>()</td><td><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00102.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00102.html
new file mode 100644 (file)
index 0000000..0dfa8c3
--- /dev/null
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::internal::atomic_backoff Member List</h1>This is the complete list of members for <a class="el" href="a00149.html">tbb::internal::atomic_backoff</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>atomic_backoff</b>() (defined in <a class="el" href="a00149.html">tbb::internal::atomic_backoff</a>)</td><td><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>bounded_pause</b>() (defined in <a class="el" href="a00149.html">tbb::internal::atomic_backoff</a>)</td><td><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a>()</td><td><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>reset</b>() (defined in <a class="el" href="a00149.html">tbb::internal::atomic_backoff</a>)</td><td><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00109.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00109.html
new file mode 100644 (file)
index 0000000..7f3c23c
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::internal::work_around_alignment_bug&lt; Size, T &gt; Member List</h1>This is the complete list of members for <a class="el" href="a00217.html">tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</a>, including all inherited members.<p><table>
+  <tr bgcolor="#f0f0f0"><td><b>alignment</b> (defined in <a class="el" href="a00217.html">tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</a>)</td><td><a class="el" href="a00217.html">tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</a></td><td><code> [static]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00115.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00115.html
new file mode 100644 (file)
index 0000000..691eeb6
--- /dev/null
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tick_count Member List</h1>This is the complete list of members for <a class="el" href="a00215.html">tbb::tick_count</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00215.html#fb7f78ca61cf28398645ace66e284473">now</a>()</td><td><a class="el" href="a00215.html">tbb::tick_count</a></td><td><code> [inline, static]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00215.html#09dde78a4100800c11bb883d6204b586">operator-</a>(const tick_count &amp;t1, const tick_count &amp;t0)</td><td><a class="el" href="a00215.html">tbb::tick_count</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">tick_count</a>()</td><td><a class="el" href="a00215.html">tbb::tick_count</a></td><td><code> [inline]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00116.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00116.html
new file mode 100644 (file)
index 0000000..07bcfe6
--- /dev/null
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Member List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb::tick_count::interval_t Member List</h1>This is the complete list of members for <a class="el" href="a00216.html">tbb::tick_count::interval_t</a>, including all inherited members.<p><table>
+  <tr class="memlist"><td><a class="el" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>()</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#1a21a428e00cced2e6a49e0f5f2258bf">interval_t</a>(double sec)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [inline, explicit]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#5871ead1ca230efbe52a5008470e6428">operator+</a>(const interval_t &amp;i, const interval_t &amp;j)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#cd9814947902e26463a69a111530f81b">operator+=</a>(const interval_t &amp;i)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#09dde78a4100800c11bb883d6204b586">operator-</a>(const tick_count &amp;t1, const tick_count &amp;t0)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#fa509691e1d689830931e36edd274f76">operator-</a>(const interval_t &amp;i, const interval_t &amp;j)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [friend]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">operator-=</a>(const interval_t &amp;i)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [inline]</code></td></tr>
+  <tr class="memlist"><td><a class="el" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">seconds</a>() const </td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [inline]</code></td></tr>
+  <tr bgcolor="#f0f0f0"><td><b>tbb::tick_count</b> (defined in <a class="el" href="a00216.html">tbb::tick_count::interval_t</a>)</td><td><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td><code> [friend]</code></td></tr>
+</table><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00145.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00145.html
new file mode 100644 (file)
index 0000000..1cfd543
--- /dev/null
@@ -0,0 +1,62 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::affinity_partitioner Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00145.html">affinity_partitioner</a></div>
+<h1>tbb::affinity_partitioner Class Reference</h1><!-- doxytag: class="tbb::affinity_partitioner" -->An affinity partitioner.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00414.html">partitioner.h</a>&gt;</code>
+<p>
+<a href="a00048.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a47f7e0208a2bf68f35b51b27d2ddf2a"></a><!-- doxytag: member="tbb::affinity_partitioner::internal::start_for" ref="a47f7e0208a2bf68f35b51b27d2ddf2a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_for</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8f710e431b62c2a48914bd99d0fe034"></a><!-- doxytag: member="tbb::affinity_partitioner::internal::start_reduce" ref="e8f710e431b62c2a48914bd99d0fe034" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_reduce</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="181db51277e9b15dd0ec4a0617ab1f92"></a><!-- doxytag: member="tbb::affinity_partitioner::internal::start_reduce_with_affinity" ref="181db51277e9b15dd0ec4a0617ab1f92" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_reduce_with_affinity</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="76d97ae6ad98db0acfc8ed8cb7c76705"></a><!-- doxytag: member="tbb::affinity_partitioner::internal::start_scan" ref="76d97ae6ad98db0acfc8ed8cb7c76705" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_scan</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e9575467e391b90e0703fadf573dbad7"></a><!-- doxytag: member="tbb::affinity_partitioner::internal::affinity_partition_type" ref="e9575467e391b90e0703fadf573dbad7" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::affinity_partition_type</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+An affinity partitioner. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00414.html">partitioner.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00146.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00146.html
new file mode 100644 (file)
index 0000000..490b0f5
--- /dev/null
@@ -0,0 +1,63 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::aligned_space&lt; T, N &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00146.html">aligned_space</a></div>
+<h1>tbb::aligned_space&lt; T, N &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::aligned_space" -->Block of space aligned sufficiently to construct an array T with N elements.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00316.html">aligned_space.h</a>&gt;</code>
+<p>
+<a href="a00001.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0d702fc6b9e9d061ace3501b3c861cdf"></a><!-- doxytag: member="tbb::aligned_space::begin" ref="0d702fc6b9e9d061ace3501b3c861cdf" args="()" -->
+T *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Pointer to beginning of array. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="024be075c23c0394c9a2518d993bcd9e"></a><!-- doxytag: member="tbb::aligned_space::end" ref="024be075c23c0394c9a2518d993bcd9e" args="()" -->
+T *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">end</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Pointer to one past last element in array. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, size_t N&gt;<br>
+ class tbb::aligned_space&lt; T, N &gt;</h3>
+
+Block of space aligned sufficiently to construct an array T with N elements. 
+<p>
+The elements are not constructed or destroyed by this class. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00316.html">aligned_space.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00147.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00147.html
new file mode 100644 (file)
index 0000000..16ae0e6
--- /dev/null
@@ -0,0 +1,61 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::atomic&lt; T &gt; Struct Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00147.html">atomic</a></div>
+<h1>tbb::atomic&lt; T &gt; Struct Template Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::atomic" -->Primary template for atomic.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00317.html">atomic.h</a>&gt;</code>
+<p>
+<a href="a00002.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1e29e03a31436bad5eb1aa5a9d46a2da"></a><!-- doxytag: member="tbb::atomic::operator=" ref="1e29e03a31436bad5eb1aa5a9d46a2da" args="(T rhs)" -->
+T&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (T rhs)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="90791297d59b1659cfaf0d93c7cc59ae"></a><!-- doxytag: member="tbb::atomic::operator=" ref="90791297d59b1659cfaf0d93c7cc59ae" args="(const atomic&lt; T &gt; &amp;rhs)" -->
+<a class="el" href="a00147.html">atomic</a>&lt; T &gt; &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00147.html">atomic</a>&lt; T &gt; &amp;rhs)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T&gt;<br>
+ struct tbb::atomic&lt; T &gt;</h3>
+
+Primary template for atomic. 
+<p>
+See the Reference for details. 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00317.html">atomic.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00148.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00148.html
new file mode 100644 (file)
index 0000000..48959d1
--- /dev/null
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::atomic&lt; void * &gt; Struct Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00148.html">atomic&lt; void * &gt;</a></div>
+<h1>tbb::atomic&lt; void * &gt; Struct Template Reference</h1><!-- doxytag: class="tbb::atomic&lt; void * &gt;" -->Specialization for <a class="el" href="a00148.html">atomic&lt;void*&gt;</a>, for sake of not allowing arithmetic or operator-&gt;.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00317.html">atomic.h</a>&gt;</code>
+<p>
+<a href="a00003.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ba6e74163e255b88c497c40db64c5dad"></a><!-- doxytag: member="tbb::atomic&lt; void * &gt;::operator=" ref="ba6e74163e255b88c497c40db64c5dad" args="(void *rhs)" -->
+void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (void *rhs)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="37979e62d1f88eb45bd3ad7df5a140f8"></a><!-- doxytag: member="tbb::atomic&lt; void * &gt;::operator=" ref="37979e62d1f88eb45bd3ad7df5a140f8" args="(const atomic&lt; void * &gt; &amp;rhs)" -->
+<a class="el" href="a00147.html">atomic</a>&lt; void * &gt; &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00147.html">atomic</a>&lt; void * &gt; &amp;rhs)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;&gt;<br>
+ struct tbb::atomic&lt; void * &gt;</h3>
+
+Specialization for <a class="el" href="a00148.html">atomic&lt;void*&gt;</a>, for sake of not allowing arithmetic or operator-&gt;. 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00317.html">atomic.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00149.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00149.html
new file mode 100644 (file)
index 0000000..980df77
--- /dev/null
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::internal::atomic_backoff Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>internal</b>::<a class="el" href="a00149.html">atomic_backoff</a></div>
+<h1>tbb::internal::atomic_backoff Class Reference</h1><!-- doxytag: class="tbb::internal::atomic_backoff" -->Class that implements exponential backoff.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00465.html">tbb_machine.h</a>&gt;</code>
+<p>
+<a href="a00102.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a174ea93e3bd3d5cce82389c2f28d037"></a><!-- doxytag: member="tbb::internal::atomic_backoff::pause" ref="a174ea93e3bd3d5cce82389c2f28d037" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Pause for a while. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c6179b19e2e8dedd73a36d523af2fe12"></a><!-- doxytag: member="tbb::internal::atomic_backoff::bounded_pause" ref="c6179b19e2e8dedd73a36d523af2fe12" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>bounded_pause</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="45edb535489eb0e76ecc59a19fd952de"></a><!-- doxytag: member="tbb::internal::atomic_backoff::reset" ref="45edb535489eb0e76ecc59a19fd952de" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>reset</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Class that implements exponential backoff. 
+<p>
+See implementation of spin_wait_while_eq for an example. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00465.html">tbb_machine.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00150.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00150.html
new file mode 100644 (file)
index 0000000..8a5ec49
--- /dev/null
@@ -0,0 +1,64 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::auto_partitioner Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00150.html">auto_partitioner</a></div>
+<h1>tbb::auto_partitioner Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::auto_partitioner" -->An auto partitioner.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00414.html">partitioner.h</a>&gt;</code>
+<p>
+<a href="a00046.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a47f7e0208a2bf68f35b51b27d2ddf2a"></a><!-- doxytag: member="tbb::auto_partitioner::internal::start_for" ref="a47f7e0208a2bf68f35b51b27d2ddf2a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_for</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8f710e431b62c2a48914bd99d0fe034"></a><!-- doxytag: member="tbb::auto_partitioner::internal::start_reduce" ref="e8f710e431b62c2a48914bd99d0fe034" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_reduce</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="76d97ae6ad98db0acfc8ed8cb7c76705"></a><!-- doxytag: member="tbb::auto_partitioner::internal::start_scan" ref="76d97ae6ad98db0acfc8ed8cb7c76705" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_scan</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>partition_type</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+An auto partitioner. 
+<p>
+The range is initial divided into several large chunks. Chunks are further subdivided into VICTIM_CHUNKS pieces if they are stolen and divisible. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00414.html">partitioner.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00151.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00151.html
new file mode 100644 (file)
index 0000000..5742616
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::bad_last_alloc Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00151.html">bad_last_alloc</a></div>
+<h1>tbb::bad_last_alloc Class Reference</h1><!-- doxytag: class="tbb::bad_last_alloc" -->Exception for concurrent containers.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<a href="a00094.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ec7a57742ff4dc214266179d88984015"></a><!-- doxytag: member="tbb::bad_last_alloc::what" ref="ec7a57742ff4dc214266179d88984015" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><b>what</b> () const   throw ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Exception for concurrent containers. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00152.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00152.html
new file mode 100644 (file)
index 0000000..a9960ac
--- /dev/null
@@ -0,0 +1,224 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::blocked_range&lt; Value &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00152.html">blocked_range</a></div>
+<h1>tbb::blocked_range&lt; Value &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::blocked_range" -->A range over which to iterate.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00325.html">blocked_range.h</a>&gt;</code>
+<p>
+<a href="a00004.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">typedef Value&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type of a value.  <a href="#1a8d05842c2b3dfc177bc4d347e4cef7"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f5707bffea38eee5c9680f37358afb8e"></a><!-- doxytag: member="tbb::blocked_range::size_type" ref="f5707bffea38eee5c9680f37358afb8e" args="" -->
+typedef std::size_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type for size of a range. <br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#94607755c5110d199202234d58d022ac">blocked_range</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct range with default-constructed values for begin and end.  <a href="#94607755c5110d199202234d58d022ac"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="14795a36ead1414b4371dbe1a4656359"></a><!-- doxytag: member="tbb::blocked_range::blocked_range" ref="14795a36ead1414b4371dbe1a4656359" args="(Value begin_, Value end_, size_type grainsize_=1)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#14795a36ead1414b4371dbe1a4656359">blocked_range</a> (Value begin_, Value end_, <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> grainsize_=1)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct range over half-open interval [begin,end), with the given grainsize. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="18d2258400756ac1446dac7676b18df3"></a><!-- doxytag: member="tbb::blocked_range::begin" ref="18d2258400756ac1446dac7676b18df3" args="() const " -->
+<a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Beginning of range. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8b929d93ddc13f148b11bceef3a3bdf8"></a><!-- doxytag: member="tbb::blocked_range::end" ref="8b929d93ddc13f148b11bceef3a3bdf8" args="() const " -->
+<a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">One past last value in range. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Size of the range.  <a href="#9eaa0b6beff1420f688570bbf6b8c462"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fcd2e5b8b6c11fd3f20fc0aa9f11bbc2"></a><!-- doxytag: member="tbb::blocked_range::grainsize" ref="fcd2e5b8b6c11fd3f20fc0aa9f11bbc2" args="() const " -->
+<a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The grain size for this range. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8f4f02f530eb3f2e7ea26e06f76aef9d"></a><!-- doxytag: member="tbb::blocked_range::empty" ref="8f4f02f530eb3f2e7ea26e06f76aef9d" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is empty. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is divisible.  <a href="#41a58b703d574b6e1ca155df3576f578"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html#4c0efd2be3f96a0ab3ba5085e8b3fcc7">blocked_range</a> (<a class="el" href="a00152.html">blocked_range</a> &amp;r, <a class="el" href="a00203.html">split</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Split range.  <a href="#4c0efd2be3f96a0ab3ba5085e8b3fcc7"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="395a514a55017e36d571809f1223389b"></a><!-- doxytag: member="tbb::blocked_range::blocked_range2d" ref="395a514a55017e36d571809f1223389b" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range2d</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="13d172a4da9be197033e64516fff1f4b"></a><!-- doxytag: member="tbb::blocked_range::blocked_range3d" ref="13d172a4da9be197033e64516fff1f4b" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range3d</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Value&gt;<br>
+ class tbb::blocked_range&lt; Value &gt;</h3>
+
+A range over which to iterate. 
+<p>
+<hr><h2>Member Typedef Documentation</h2>
+<a class="anchor" name="1a8d05842c2b3dfc177bc4d347e4cef7"></a><!-- doxytag: member="tbb::blocked_range::const_iterator" ref="1a8d05842c2b3dfc177bc4d347e4cef7" args="" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Value&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef Value <a class="el" href="a00152.html">tbb::blocked_range</a>&lt; Value &gt;::<a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Type of a value. 
+<p>
+Called a const_iterator for sake of algorithms that need to treat a <a class="el" href="a00152.html">blocked_range</a> as an STL container. 
+</div>
+</div><p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="94607755c5110d199202234d58d022ac"></a><!-- doxytag: member="tbb::blocked_range::blocked_range" ref="94607755c5110d199202234d58d022ac" args="()" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Value&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00152.html">tbb::blocked_range</a>&lt; Value &gt;::<a class="el" href="a00152.html">blocked_range</a>           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Construct range with default-constructed values for begin and end. 
+<p>
+Requires that Value have a default constructor. 
+</div>
+</div><p>
+<a class="anchor" name="4c0efd2be3f96a0ab3ba5085e8b3fcc7"></a><!-- doxytag: member="tbb::blocked_range::blocked_range" ref="4c0efd2be3f96a0ab3ba5085e8b3fcc7" args="(blocked_range &amp;r, split)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Value&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00152.html">tbb::blocked_range</a>&lt; Value &gt;::<a class="el" href="a00152.html">blocked_range</a>           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00152.html">blocked_range</a>&lt; Value &gt; &amp;&nbsp;</td>
+          <td class="paramname"> <em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="a00203.html">split</a>&nbsp;</td>
+          <td class="paramname"></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Split range. 
+<p>
+The new Range *this has the second half, the old range r has the first half. Unspecified if <a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end()</a>&lt;<a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin()</a> or !is_divisible(). 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="41a58b703d574b6e1ca155df3576f578"></a><!-- doxytag: member="tbb::blocked_range::is_divisible" ref="41a58b703d574b6e1ca155df3576f578" args="() const " -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Value&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00152.html">tbb::blocked_range</a>&lt; Value &gt;::is_divisible           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"> const<code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+True if range is divisible. 
+<p>
+Unspecified if <a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end()</a>&lt;<a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin()</a>. 
+</div>
+</div><p>
+<a class="anchor" name="9eaa0b6beff1420f688570bbf6b8c462"></a><!-- doxytag: member="tbb::blocked_range::size" ref="9eaa0b6beff1420f688570bbf6b8c462" args="() const " -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Value&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> <a class="el" href="a00152.html">tbb::blocked_range</a>&lt; Value &gt;::size           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"> const<code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Size of the range. 
+<p>
+Unspecified if <a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end()</a>&lt;<a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin()</a>. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00325.html">blocked_range.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00153.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00153.html
new file mode 100644 (file)
index 0000000..8c97b90
--- /dev/null
@@ -0,0 +1,86 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::blocked_range2d&lt; RowValue, ColValue &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00153.html">blocked_range2d</a></div>
+<h1>tbb::blocked_range2d&lt; RowValue, ColValue &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::blocked_range2d" -->A 2-dimensional range that models the Range concept.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00326.html">blocked_range2d.h</a>&gt;</code>
+<p>
+<a href="a00005.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a807a22fe658ec38b8edfd69521d0383"></a><!-- doxytag: member="tbb::blocked_range2d::row_range_type" ref="a807a22fe658ec38b8edfd69521d0383" args="" -->
+typedef <a class="el" href="a00152.html">blocked_range</a>&lt; RowValue &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html#a807a22fe658ec38b8edfd69521d0383">row_range_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type for size of an iteation range. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="750d5c6ac9915443df477a30031c07fd"></a><!-- doxytag: member="tbb::blocked_range2d::col_range_type" ref="750d5c6ac9915443df477a30031c07fd" args="" -->
+typedef <a class="el" href="a00152.html">blocked_range</a>&lt; ColValue &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>col_range_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4180bc35d4f8ed9f20045dba4f2a1bdc"></a><!-- doxytag: member="tbb::blocked_range2d::blocked_range2d" ref="4180bc35d4f8ed9f20045dba4f2a1bdc" args="(RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range2d</b> (RowValue row_begin, RowValue row_end, typename <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">row_range_type::size_type</a> row_grainsize, ColValue col_begin, ColValue col_end, typename <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">col_range_type::size_type</a> col_grainsize)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="485434db1609b2074d86cb604f3c674e"></a><!-- doxytag: member="tbb::blocked_range2d::blocked_range2d" ref="485434db1609b2074d86cb604f3c674e" args="(RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range2d</b> (RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d144cb2d88cef553420311aca8667a44"></a><!-- doxytag: member="tbb::blocked_range2d::empty" ref="d144cb2d88cef553420311aca8667a44" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html#d144cb2d88cef553420311aca8667a44">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is empty. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ad36a9b38e4fef26d376f99552ce2d92"></a><!-- doxytag: member="tbb::blocked_range2d::is_divisible" ref="ad36a9b38e4fef26d376f99552ce2d92" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">is_divisible</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is divisible into two pieces. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4f80e6db02bfa4e90e2a219de9f17461"></a><!-- doxytag: member="tbb::blocked_range2d::blocked_range2d" ref="4f80e6db02bfa4e90e2a219de9f17461" args="(blocked_range2d &amp;r, split)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range2d</b> (<a class="el" href="a00153.html">blocked_range2d</a> &amp;r, <a class="el" href="a00203.html">split</a>)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f496e7348a82652fba581203477cc07c"></a><!-- doxytag: member="tbb::blocked_range2d::rows" ref="f496e7348a82652fba581203477cc07c" args="() const " -->
+const <a class="el" href="a00152.html">row_range_type</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html#f496e7348a82652fba581203477cc07c">rows</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The rows of the iteration space. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="392a46759af2c884957115771affa7f4"></a><!-- doxytag: member="tbb::blocked_range2d::cols" ref="392a46759af2c884957115771affa7f4" args="() const " -->
+const <a class="el" href="a00152.html">col_range_type</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html#392a46759af2c884957115771affa7f4">cols</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The columns of the iteration space. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename RowValue, typename ColValue = RowValue&gt;<br>
+ class tbb::blocked_range2d&lt; RowValue, ColValue &gt;</h3>
+
+A 2-dimensional range that models the Range concept. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00326.html">blocked_range2d.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00154.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00154.html
new file mode 100644 (file)
index 0000000..a076728
--- /dev/null
@@ -0,0 +1,93 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00154.html">blocked_range3d</a></div>
+<h1>tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::blocked_range3d" -->A 3-dimensional range that models the Range concept.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00327.html">blocked_range3d.h</a>&gt;</code>
+<p>
+<a href="a00006.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b8ebf17a552ba47825e9b3887855b719"></a><!-- doxytag: member="tbb::blocked_range3d::page_range_type" ref="b8ebf17a552ba47825e9b3887855b719" args="" -->
+typedef <a class="el" href="a00152.html">blocked_range</a>&lt; PageValue &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#b8ebf17a552ba47825e9b3887855b719">page_range_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type for size of an iteation range. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4edc39b1a76c84c8f43ea8bff3e88007"></a><!-- doxytag: member="tbb::blocked_range3d::row_range_type" ref="4edc39b1a76c84c8f43ea8bff3e88007" args="" -->
+typedef <a class="el" href="a00152.html">blocked_range</a>&lt; RowValue &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>row_range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="02229b59d4dc5437bf5fbe72acca7f66"></a><!-- doxytag: member="tbb::blocked_range3d::col_range_type" ref="02229b59d4dc5437bf5fbe72acca7f66" args="" -->
+typedef <a class="el" href="a00152.html">blocked_range</a>&lt; ColValue &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>col_range_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a762f420ed83a887967cb7dfe337582c"></a><!-- doxytag: member="tbb::blocked_range3d::blocked_range3d" ref="a762f420ed83a887967cb7dfe337582c" args="(PageValue page_begin, PageValue page_end, RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range3d</b> (PageValue page_begin, PageValue page_end, RowValue row_begin, RowValue row_end, ColValue col_begin, ColValue col_end)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0b4dcf5b059634b95c1671086bf6d4ff"></a><!-- doxytag: member="tbb::blocked_range3d::blocked_range3d" ref="0b4dcf5b059634b95c1671086bf6d4ff" args="(PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range3d</b> (PageValue page_begin, PageValue page_end, typename <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">page_range_type::size_type</a> page_grainsize, RowValue row_begin, RowValue row_end, typename <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">row_range_type::size_type</a> row_grainsize, ColValue col_begin, ColValue col_end, typename <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">col_range_type::size_type</a> col_grainsize)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="356860e1c977d91711e8216bd55c0b25"></a><!-- doxytag: member="tbb::blocked_range3d::empty" ref="356860e1c977d91711e8216bd55c0b25" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#356860e1c977d91711e8216bd55c0b25">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is empty. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="39d69191721c488e737ae5d9c5336b9c"></a><!-- doxytag: member="tbb::blocked_range3d::is_divisible" ref="39d69191721c488e737ae5d9c5336b9c" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">is_divisible</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if range is divisible into two pieces. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a0d6f0085c9fb364ddcbc2197da6c251"></a><!-- doxytag: member="tbb::blocked_range3d::blocked_range3d" ref="a0d6f0085c9fb364ddcbc2197da6c251" args="(blocked_range3d &amp;r, split)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>blocked_range3d</b> (<a class="el" href="a00154.html">blocked_range3d</a> &amp;r, <a class="el" href="a00203.html">split</a>)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cf971430aa12361d3ed245344b7c6764"></a><!-- doxytag: member="tbb::blocked_range3d::pages" ref="cf971430aa12361d3ed245344b7c6764" args="() const " -->
+const <a class="el" href="a00152.html">page_range_type</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#cf971430aa12361d3ed245344b7c6764">pages</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The pages of the iteration space. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1584623e59ff32a8aa82006827508be4"></a><!-- doxytag: member="tbb::blocked_range3d::rows" ref="1584623e59ff32a8aa82006827508be4" args="() const " -->
+const <a class="el" href="a00152.html">row_range_type</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#1584623e59ff32a8aa82006827508be4">rows</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The rows of the iteration space. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3336ba9480fd6c43e158f9beb024c050"></a><!-- doxytag: member="tbb::blocked_range3d::cols" ref="3336ba9480fd6c43e158f9beb024c050" args="() const " -->
+const <a class="el" href="a00152.html">col_range_type</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">cols</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The columns of the iteration space. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename PageValue, typename RowValue = PageValue, typename ColValue = RowValue&gt;<br>
+ class tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</h3>
+
+A 3-dimensional range that models the Range concept. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00327.html">blocked_range3d.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00155.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00155.html
new file mode 100644 (file)
index 0000000..74db6f2
--- /dev/null
@@ -0,0 +1,114 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::cache_aligned_allocator&lt; T &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00155.html">cache_aligned_allocator</a></div>
+<h1>tbb::cache_aligned_allocator&lt; T &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::cache_aligned_allocator" -->Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00328.html">cache_aligned_allocator.h</a>&gt;</code>
+<p>
+<a href="a00007.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8f7128452e3c43b5bf1801b61d971d17"></a><!-- doxytag: member="tbb::cache_aligned_allocator::value_type" ref="8f7128452e3c43b5bf1801b61d971d17" args="" -->
+typedef internal::allocator_type&lt;<br>
+ T &gt;::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b23ef1d76c34fe1c2ebbb2f3df5b5676"></a><!-- doxytag: member="tbb::cache_aligned_allocator::pointer" ref="b23ef1d76c34fe1c2ebbb2f3df5b5676" args="" -->
+typedef value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d15affd67eafcabbd12bd1cd07372994"></a><!-- doxytag: member="tbb::cache_aligned_allocator::const_pointer" ref="d15affd67eafcabbd12bd1cd07372994" args="" -->
+typedef const value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8c4e0d5867a07b1cea21d4fca2998c8"></a><!-- doxytag: member="tbb::cache_aligned_allocator::reference" ref="e8c4e0d5867a07b1cea21d4fca2998c8" args="" -->
+typedef value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d3d77a39f0852c42f80c6505843f450c"></a><!-- doxytag: member="tbb::cache_aligned_allocator::const_reference" ref="d3d77a39f0852c42f80c6505843f450c" args="" -->
+typedef const value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="90ca6a2979ebee466be771f4c2f9337e"></a><!-- doxytag: member="tbb::cache_aligned_allocator::size_type" ref="90ca6a2979ebee466be771f4c2f9337e" args="" -->
+typedef size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b73eba293d042333f343d025c26751a4"></a><!-- doxytag: member="tbb::cache_aligned_allocator::difference_type" ref="b73eba293d042333f343d025c26751a4" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="74ccd00a29171ddd61a4ea88cfa49c32"></a><!-- doxytag: member="tbb::cache_aligned_allocator::cache_aligned_allocator" ref="74ccd00a29171ddd61a4ea88cfa49c32" args="(const cache_aligned_allocator &amp;)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>cache_aligned_allocator</b> (const <a class="el" href="a00155.html">cache_aligned_allocator</a> &amp;)  throw ()</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="de57e731c36a51bf39b458c9b8adee58"></a><!-- doxytag: member="tbb::cache_aligned_allocator::cache_aligned_allocator" ref="de57e731c36a51bf39b458c9b8adee58" args="(const cache_aligned_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>cache_aligned_allocator</b> (const <a class="el" href="a00155.html">cache_aligned_allocator</a>&lt; U &gt; &amp;)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="023e0cc7cf6ebde69e1f2c12182eb12b"></a><!-- doxytag: member="tbb::cache_aligned_allocator::address" ref="023e0cc7cf6ebde69e1f2c12182eb12b" args="(reference x) const " -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="db4f7613b1de12d1e79285c15636cf3b"></a><!-- doxytag: member="tbb::cache_aligned_allocator::address" ref="db4f7613b1de12d1e79285c15636cf3b" args="(const_reference x) const " -->
+const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (const_reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4cdeea67af6c1fcd8f1d5e9c4cab61e8"></a><!-- doxytag: member="tbb::cache_aligned_allocator::allocate" ref="4cdeea67af6c1fcd8f1d5e9c4cab61e8" args="(size_type n, const void *hint=0)" -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">allocate</a> (size_type n, const void *hint=0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocate space for n objects, starting on a cache/sector line. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3d4eadf188f7d85d3805ae534e0b8e1c"></a><!-- doxytag: member="tbb::cache_aligned_allocator::deallocate" ref="3d4eadf188f7d85d3805ae534e0b8e1c" args="(pointer p, size_type)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">deallocate</a> (pointer p, size_type)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Free block of memory that starts on a cache line. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fb23b687b4c0429dab4c7f8017266cf0"></a><!-- doxytag: member="tbb::cache_aligned_allocator::max_size" ref="fb23b687b4c0429dab4c7f8017266cf0" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">max_size</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Largest value for which method allocate might succeed. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="958ee8745c86c275bfc9533af565e017"></a><!-- doxytag: member="tbb::cache_aligned_allocator::construct" ref="958ee8745c86c275bfc9533af565e017" args="(pointer p, const value_type &amp;value)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html#958ee8745c86c275bfc9533af565e017">construct</a> (pointer p, const value_type &amp;value)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy-construct value at location pointed to by p. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cd298895a4f1654b5149ec84b591ecb5"></a><!-- doxytag: member="tbb::cache_aligned_allocator::destroy" ref="cd298895a4f1654b5149ec84b591ecb5" args="(pointer p)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">destroy</a> (pointer p)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy value at location pointed to by p. <br></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T&gt;<br>
+ class tbb::cache_aligned_allocator&lt; T &gt;</h3>
+
+Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5. 
+<p>
+The members are ordered the same way they are in section 20.4.1 of the ISO C++ standard. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00328.html">cache_aligned_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00156.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00156.html
new file mode 100644 (file)
index 0000000..616dea2
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::cache_aligned_allocator&lt; void &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00156.html">cache_aligned_allocator&lt; void &gt;</a></div>
+<h1>tbb::cache_aligned_allocator&lt; void &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::cache_aligned_allocator&lt; void &gt;" -->Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00328.html">cache_aligned_allocator.h</a>&gt;</code>
+<p>
+<a href="a00009.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ce89776145998f0685a767b4f195272d"></a><!-- doxytag: member="tbb::cache_aligned_allocator&lt; void &gt;::pointer" ref="ce89776145998f0685a767b4f195272d" args="" -->
+typedef void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4431fbbe1a8cfeb19c48ad0072078f45"></a><!-- doxytag: member="tbb::cache_aligned_allocator&lt; void &gt;::const_pointer" ref="4431fbbe1a8cfeb19c48ad0072078f45" args="" -->
+typedef const void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2ab296f4ecdd75f8e193ff92004a697d"></a><!-- doxytag: member="tbb::cache_aligned_allocator&lt; void &gt;::value_type" ref="2ab296f4ecdd75f8e193ff92004a697d" args="" -->
+typedef void&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;&gt;<br>
+ class tbb::cache_aligned_allocator&lt; void &gt;</h3>
+
+Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00328.html">cache_aligned_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.html
new file mode 100644 (file)
index 0000000..1e8a9d7
--- /dev/null
@@ -0,0 +1,157 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::captured_exception Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00157.html">captured_exception</a></div>
+<h1>tbb::captured_exception Class Reference</h1><!-- doxytag: class="tbb::captured_exception" --><!-- doxytag: inherits="tbb::tbb_exception" -->This class is used by TBB to propagate information about unhandled exceptions into the root thread.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::captured_exception:
+<p><center><img src="a00157.png" usemap="#tbb::captured_exception_map" border="0" alt=""></center>
+<map name="tbb::captured_exception_map">
+<area href="a00211.html" alt="tbb::tbb_exception" shape="rect" coords="0,0,146,24">
+</map>
+<a href="a00099.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b50d9716bcdd281df574ec758a3952f9"></a><!-- doxytag: member="tbb::captured_exception::captured_exception" ref="b50d9716bcdd281df574ec758a3952f9" args="(const captured_exception &amp;src)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>captured_exception</b> (const <a class="el" href="a00157.html">captured_exception</a> &amp;src)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="179076f146699724f652a9b1e81bd791"></a><!-- doxytag: member="tbb::captured_exception::captured_exception" ref="179076f146699724f652a9b1e81bd791" args="(const char *name_, const char *info)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>captured_exception</b> (const char *name_, const char *info)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e95b8eb99ec9dea22934ac528a08654c"></a><!-- doxytag: member="tbb::captured_exception::operator=" ref="e95b8eb99ec9dea22934ac528a08654c" args="(const captured_exception &amp;src)" -->
+<a class="el" href="a00157.html">captured_exception</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00157.html">captured_exception</a> &amp;src)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="el" href="a00157.html">captured_exception</a> *__TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">move</a> ()  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Creates and returns pointer to the deep copy of this exception object.  <a href="#837a50b8f6a800bda225c39d1699643f"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html#93d875d3555502ff6f18513525de204c">destroy</a> ()  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys objects created by the <a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">move()</a> method.  <a href="#93d875d3555502ff6f18513525de204c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">throw_self</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Throws this exception object.  <a href="#2dd1be66ab32fa27e0ddef5707fa67ef"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5af82fd677449c5ca727fa1d7e16f9f5"></a><!-- doxytag: member="tbb::captured_exception::name" ref="5af82fd677449c5ca727fa1d7e16f9f5" args="() const " -->
+const char *__TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">name</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns RTTI name of the originally intercepted exception. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6b5988ef74a1fe2a58998d110b3633e0"></a><!-- doxytag: member="tbb::captured_exception::what" ref="6b5988ef74a1fe2a58998d110b3633e0" args="() const " -->
+const char *__TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">what</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns the result of originally intercepted exception's <a class="el" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">what()</a> method. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="92ed4100413eef26969f8299f6845e18"></a><!-- doxytag: member="tbb::captured_exception::set" ref="92ed4100413eef26969f8299f6845e18" args="(const char *name, const char *info)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><b>set</b> (const char *name, const char *info)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d5f56d05102acdba6c2f09ba6db3baeb"></a><!-- doxytag: member="tbb::captured_exception::clear" ref="d5f56d05102acdba6c2f09ba6db3baeb" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><b>clear</b> ()  throw ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+This class is used by TBB to propagate information about unhandled exceptions into the root thread. 
+<p>
+Exception of this type is thrown by TBB in the root thread (thread that started a parallel algorithm ) if an unhandled exception was intercepted during the algorithm execution in one of the workers. <dl compact><dt><b>See also:</b></dt><dd><a class="el" href="a00211.html">tbb::tbb_exception</a> </dd></dl>
+
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="93d875d3555502ff6f18513525de204c"></a><!-- doxytag: member="tbb::captured_exception::destroy" ref="93d875d3555502ff6f18513525de204c" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::captured_exception::destroy           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroys objects created by the <a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">move()</a> method. 
+<p>
+Frees memory and calls destructor for this exception object. Can and must be used only on objects created by the move method. 
+<p>
+Implements <a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<a class="anchor" name="837a50b8f6a800bda225c39d1699643f"></a><!-- doxytag: member="tbb::captured_exception::move" ref="837a50b8f6a800bda225c39d1699643f" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00157.html">captured_exception</a>* __TBB_EXPORTED_METHOD tbb::captured_exception::move           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Creates and returns pointer to the deep copy of this exception object. 
+<p>
+Move semantics is allowed. 
+<p>
+Implements <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<a class="anchor" name="2dd1be66ab32fa27e0ddef5707fa67ef"></a><!-- doxytag: member="tbb::captured_exception::throw_self" ref="2dd1be66ab32fa27e0ddef5707fa67ef" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::captured_exception::throw_self           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline, virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Throws this exception object. 
+<p>
+Make sure that if you have several levels of derivation from this interface you implement or override this method on the most derived level. The implementation is as simple as "throw *this;". Failure to do this will result in exception of a base class type being thrown. 
+<p>
+Implements <a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.png
new file mode 100644 (file)
index 0000000..2470bea
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00157.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00158.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00158.html
new file mode 100644 (file)
index 0000000..3be6782
--- /dev/null
@@ -0,0 +1,84 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::combinable&lt; T &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00158.html">combinable</a></div>
+<h1>tbb::combinable&lt; T &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::combinable" -->Thread-local storage with optional reduction.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00330.html">combinable.h</a>&gt;</code>
+<p>
+<a href="a00011.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="d03619e90e62555aa5634fcc8adadcc9"></a><!-- doxytag: member="tbb::combinable::combinable" ref="d03619e90e62555aa5634fcc8adadcc9" args="(finit _finit)" -->
+template&lt;typename finit&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>combinable</b> (finit _finit)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c87e79ae98588a5780f708773388843"></a><!-- doxytag: member="tbb::combinable::~combinable" ref="2c87e79ae98588a5780f708773388843" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00158.html#2c87e79ae98588a5780f708773388843">~combinable</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">destructor <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="57012803b7bc7416452cb930121ff210"></a><!-- doxytag: member="tbb::combinable::combinable" ref="57012803b7bc7416452cb930121ff210" args="(const combinable &amp;other)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>combinable</b> (const <a class="el" href="a00158.html">combinable</a> &amp;other)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c9650d59f65b1042afa232ede57dfca9"></a><!-- doxytag: member="tbb::combinable::operator=" ref="c9650d59f65b1042afa232ede57dfca9" args="(const combinable &amp;other)" -->
+<a class="el" href="a00158.html">combinable</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00158.html">combinable</a> &amp;other)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f09233cccb9b5e8a58874e43a703cac2"></a><!-- doxytag: member="tbb::combinable::clear" ref="f09233cccb9b5e8a58874e43a703cac2" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>clear</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2d7550204bcb88fa8810b5a39e9cad24"></a><!-- doxytag: member="tbb::combinable::local" ref="2d7550204bcb88fa8810b5a39e9cad24" args="()" -->
+T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>local</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="93a42052881ecccdd8ddff16a1b1cca1"></a><!-- doxytag: member="tbb::combinable::local" ref="93a42052881ecccdd8ddff16a1b1cca1" args="(bool &amp;exists)" -->
+T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>local</b> (bool &amp;exists)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="9be306b4a073066dcac4043dc189c514"></a><!-- doxytag: member="tbb::combinable::combine" ref="9be306b4a073066dcac4043dc189c514" args="(combine_func_t f_combine)" -->
+template&lt;typename combine_func_t&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">T&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>combine</b> (combine_func_t f_combine)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="dcd23283834149c87c345c0e476dfee8"></a><!-- doxytag: member="tbb::combinable::combine_each" ref="dcd23283834149c87c345c0e476dfee8" args="(combine_func_t f_combine)" -->
+template&lt;typename combine_func_t&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>combine_each</b> (combine_func_t f_combine)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T&gt;<br>
+ class tbb::combinable&lt; T &gt;</h3>
+
+Thread-local storage with optional reduction. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00330.html">combinable.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.html
new file mode 100644 (file)
index 0000000..02db9b4
--- /dev/null
@@ -0,0 +1,306 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::concurrent_bounded_queue&lt; T, A &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00159.html">concurrent_bounded_queue</a></div>
+<h1>tbb::concurrent_bounded_queue&lt; T, A &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::concurrent_bounded_queue" -->A high-performance thread-safe blocking concurrent bounded queue.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00341.html">concurrent_queue.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::concurrent_bounded_queue&lt; T, A &gt;:
+<p><center><img src="a00159.png" usemap="#tbb::concurrent_bounded_queue< T, A >_map" border="0" alt=""></center>
+<map name="tbb::concurrent_bounded_queue< T, A >_map">
+<area href="a00165.html" alt="tbb::deprecated::concurrent_queue< T, A >" shape="rect" coords="0,56,257,80">
+</map>
+<a href="a00019.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="98245517a931e5893f6601e66c51fc75"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::value_type" ref="98245517a931e5893f6601e66c51fc75" args="" -->
+typedef T&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#98245517a931e5893f6601e66c51fc75">value_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Element type in the queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2e2726fccf6d975dc1071608cc0bbf90"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::allocator_type" ref="2e2726fccf6d975dc1071608cc0bbf90" args="" -->
+typedef A&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocator type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dcd44ca6a88c0dc7a847a47a10811f0c"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::reference" ref="dcd44ca6a88c0dc7a847a47a10811f0c" args="" -->
+typedef T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">reference</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Reference type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="796713d0b9ba93a4721cbe13e4474068"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::const_reference" ref="796713d0b9ba93a4721cbe13e4474068" args="" -->
+typedef const T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">const_reference</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Const reference type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">typedef std::ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Integral type for representing size of the queue.  <a href="#a80e4c11dbb324e4b92a24a77bbcde68"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4b45c91297e69515d83d5eef85ae1f49"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::difference_type" ref="4b45c91297e69515d83d5eef85ae1f49" args="" -->
+typedef std::ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">difference_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Difference type for iterator. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="aeea2ad38b7e7fc2866f063dfed24c52"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::iterator" ref="aeea2ad38b7e7fc2866f063dfed24c52" args="" -->
+typedef internal::concurrent_queue_iterator&lt;<br>
+ <a class="el" href="a00159.html">concurrent_bounded_queue</a>,<br>
+ T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0edd1a90e6ffa503bac1c4027116dbff"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::const_iterator" ref="0edd1a90e6ffa503bac1c4027116dbff" args="" -->
+typedef internal::concurrent_queue_iterator&lt;<br>
+ <a class="el" href="a00159.html">concurrent_bounded_queue</a>,<br>
+ const T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e3525226732963ff0f13e89d8e6dab5b"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::concurrent_bounded_queue" ref="e3525226732963ff0f13e89d8e6dab5b" args="(const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a> (const <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> &amp;a=<a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7b7fb414d2eaa8a7f5d68fc4cd63ac50"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::concurrent_bounded_queue" ref="7b7fb414d2eaa8a7f5d68fc4cd63ac50" args="(const concurrent_bounded_queue &amp;src, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#7b7fb414d2eaa8a7f5d68fc4cd63ac50">concurrent_bounded_queue</a> (const <a class="el" href="a00159.html">concurrent_bounded_queue</a> &amp;src, const <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> &amp;a=<a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy constructor. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="a5e04dcd7db9fd9b583b4e7df832246a"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::concurrent_bounded_queue" ref="a5e04dcd7db9fd9b583b4e7df832246a" args="(InputIterator begin, InputIterator end, const allocator_type &amp;a=allocator_type())" -->
+template&lt;typename InputIterator&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">concurrent_bounded_queue</a> (InputIterator begin, InputIterator end, const <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> &amp;a=<a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">[begin,end) constructor <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="acaf5b510dc0dfc7780b8c956cf773cf"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::~concurrent_bounded_queue" ref="acaf5b510dc0dfc7780b8c956cf773cf" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">~concurrent_bounded_queue</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ceb08c743b11ba88c878e73fff8af20b"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::push" ref="ceb08c743b11ba88c878e73fff8af20b" args="(const T &amp;source)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">push</a> (const T &amp;source)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enqueue an item at tail of queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">pop</a> (T &amp;destination)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Dequeue item from head of queue.  <a href="#41f4c6bd7a82ab070e840bbf81b0b123"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">try_push</a> (const T &amp;source)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enqueue an item at tail of queue if queue is not already full.  <a href="#2bd6232531279fb3ccbd296bea23066b"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop</a> (T &amp;destination)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Attempt to dequeue an item from head of queue.  <a href="#0ca487019bbb00a196442aff78a1e4f7"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return number of pushes minus number of pops.  <a href="#7dc14d1a579a4cccda9f857585e1768d"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f64924f2ee9225c368a270fc3c394db9"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::empty" ref="f64924f2ee9225c368a270fc3c394db9" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Equivalent to <a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">size()</a>&lt;=0. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b2888b3e4e837d7e03f2c731963a402b"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::capacity" ref="b2888b3e4e837d7e03f2c731963a402b" args="() const " -->
+<a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">capacity</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Maximum number of allowed elements. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">set_capacity</a> (<a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> new_capacity)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set the capacity.  <a href="#f3c6c934f85fd02aedbc83a16943193b"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="415eb87e53b1c6a266de06ecbc490d16"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::get_allocator" ref="415eb87e53b1c6a266de06ecbc490d16" args="() const " -->
+<a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">get_allocator</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">return allocator object <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="90b31e2954c6e4596c7900435a5f4bc1"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::clear" ref="90b31e2954c6e4596c7900435a5f4bc1" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">clear the queue. not thread-safe. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5a2637188e104503ac2c0dff633014e8"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::unsafe_begin" ref="5a2637188e104503ac2c0dff633014e8" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_begin</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c7267334ef11087c76c810abb9d5cbc2"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::unsafe_end" ref="c7267334ef11087c76c810abb9d5cbc2" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_end</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8be757841d0bbf7744508aaf6a141d08"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::unsafe_begin" ref="8be757841d0bbf7744508aaf6a141d08" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_begin</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3aac91d288bf227d9d06717b44ef28f7"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::unsafe_end" ref="3aac91d288bf227d9d06717b44ef28f7" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_end</b> () const </td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05bffad9626d51abbd4bb82ae2a1cceb"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::internal::concurrent_queue_iterator" ref="05bffad9626d51abbd4bb82ae2a1cceb" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::concurrent_queue_iterator</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>destroyer</b></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Class used to ensure exception-safety of method "pop". <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt;<br>
+ class tbb::concurrent_bounded_queue&lt; T, A &gt;</h3>
+
+A high-performance thread-safe blocking concurrent bounded queue. 
+<p>
+This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. Note that method names agree with the PPL-style concurrent queue. Multiple threads may each push and pop concurrently. Assignment construction is not allowed. 
+<p>
+<hr><h2>Member Typedef Documentation</h2>
+<a class="anchor" name="a80e4c11dbb324e4b92a24a77bbcde68"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::size_type" ref="a80e4c11dbb324e4b92a24a77bbcde68" args="" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef std::ptrdiff_t <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::<a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Integral type for representing size of the queue. 
+<p>
+Notice that the size_type is a signed integral type. This is because the size can be negative if there are pending pops without corresponding pushes. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="41f4c6bd7a82ab070e840bbf81b0b123"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::pop" ref="41f4c6bd7a82ab070e840bbf81b0b123" args="(T &amp;destination)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::pop           </td>
+          <td>(</td>
+          <td class="paramtype">T &amp;&nbsp;</td>
+          <td class="paramname"> <em>destination</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Dequeue item from head of queue. 
+<p>
+Block until an item becomes available, and then dequeue it. 
+</div>
+</div><p>
+<a class="anchor" name="f3c6c934f85fd02aedbc83a16943193b"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::set_capacity" ref="f3c6c934f85fd02aedbc83a16943193b" args="(size_type new_capacity)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::set_capacity           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a>&nbsp;</td>
+          <td class="paramname"> <em>new_capacity</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Set the capacity. 
+<p>
+Setting the capacity to 0 causes subsequent try_push operations to always fail, and subsequent push operations to block forever. 
+</div>
+</div><p>
+<a class="anchor" name="7dc14d1a579a4cccda9f857585e1768d"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::size" ref="7dc14d1a579a4cccda9f857585e1768d" args="() const " -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::size           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"> const<code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Return number of pushes minus number of pops. 
+<p>
+Note that the result can be negative if there are pops waiting for the corresponding pushes. The result can also exceed <a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">capacity()</a> if there are push operations in flight. 
+</div>
+</div><p>
+<a class="anchor" name="0ca487019bbb00a196442aff78a1e4f7"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::try_pop" ref="0ca487019bbb00a196442aff78a1e4f7" args="(T &amp;destination)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::try_pop           </td>
+          <td>(</td>
+          <td class="paramtype">T &amp;&nbsp;</td>
+          <td class="paramname"> <em>destination</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Attempt to dequeue an item from head of queue. 
+<p>
+Does not wait for item to become available. Returns true if successful; false otherwise. 
+</div>
+</div><p>
+<a class="anchor" name="2bd6232531279fb3ccbd296bea23066b"></a><!-- doxytag: member="tbb::concurrent_bounded_queue::try_push" ref="2bd6232531279fb3ccbd296bea23066b" args="(const T &amp;source)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00159.html">tbb::concurrent_bounded_queue</a>&lt; T, A &gt;::try_push           </td>
+          <td>(</td>
+          <td class="paramtype">const T &amp;&nbsp;</td>
+          <td class="paramname"> <em>source</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Enqueue an item at tail of queue if queue is not already full. 
+<p>
+Does not wait for queue to become not full. Returns true if item is pushed; false if queue was already full. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00341.html">concurrent_queue.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.png
new file mode 100644 (file)
index 0000000..e9d90a2
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00159.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00160.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00160.html
new file mode 100644 (file)
index 0000000..c0b6154
--- /dev/null
@@ -0,0 +1,646 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface4</b>::<a class="el" href="a00160.html">concurrent_hash_map</a></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::interface4::concurrent_hash_map" -->Unordered map from Key to T.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00331.html">concurrent_hash_map.h</a>&gt;</code>
+<p>
+<a href="a00013.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="33b3b94e17dab45c97b246bca3625655"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::key_type" ref="33b3b94e17dab45c97b246bca3625655" args="" -->
+typedef Key&nbsp;</td><td class="memItemRight" valign="bottom"><b>key_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a5d75ef053d5e69de88ae7e941a743ec"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::mapped_type" ref="a5d75ef053d5e69de88ae7e941a743ec" args="" -->
+typedef T&nbsp;</td><td class="memItemRight" valign="bottom"><b>mapped_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="01ebd1df398d3823ca6ea8fcb6b068ae"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::value_type" ref="01ebd1df398d3823ca6ea8fcb6b068ae" args="" -->
+typedef std::pair&lt; const Key,<br>
+ T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ae6bb63bc9cf97d63e78d208ae0e479d"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::size_type" ref="ae6bb63bc9cf97d63e78d208ae0e479d" args="" -->
+typedef hash_map_base::size_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2f5093a93592b0a30fda935711f9f788"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::difference_type" ref="2f5093a93592b0a30fda935711f9f788" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1d1ffb83cae339007dc8d5b2e4e3cea8"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::pointer" ref="1d1ffb83cae339007dc8d5b2e4e3cea8" args="" -->
+typedef value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ecf3dc1d6d8290a71639967ffa933343"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_pointer" ref="ecf3dc1d6d8290a71639967ffa933343" args="" -->
+typedef const value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="14a3d74308b04a355b0689eb019ee683"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::reference" ref="14a3d74308b04a355b0689eb019ee683" args="" -->
+typedef value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1f6fcb68889829bca708d57f7b9e2da4"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_reference" ref="1f6fcb68889829bca708d57f7b9e2da4" args="" -->
+typedef const value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="118b95ebef79ac195fb99babbf335e8c"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::iterator" ref="118b95ebef79ac195fb99babbf335e8c" args="" -->
+typedef internal::hash_map_iterator&lt;<br>
+ <a class="el" href="a00160.html">concurrent_hash_map</a>, value_type &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5fec1af5576448740eb5c6119a30c6a9"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_iterator" ref="5fec1af5576448740eb5c6119a30c6a9" args="" -->
+typedef internal::hash_map_iterator&lt;<br>
+ <a class="el" href="a00160.html">concurrent_hash_map</a>, const <br>
+value_type &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="aa3d1bdabd5e9a15174624946048a2c8"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::range_type" ref="aa3d1bdabd5e9a15174624946048a2c8" args="" -->
+typedef internal::hash_map_range&lt;<br>
+ iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="20981e8295ff79f4e3d8b76a16c42851"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_range_type" ref="20981e8295ff79f4e3d8b76a16c42851" args="" -->
+typedef internal::hash_map_range&lt;<br>
+ const_iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f8d6e7e809e96f48fd039a54fc4df630"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::allocator_type" ref="f8d6e7e809e96f48fd039a54fc4df630" args="" -->
+typedef Allocator&nbsp;</td><td class="memItemRight" valign="bottom"><b>allocator_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d827bb5e4f61de1916ab67d51c7c6e60"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::concurrent_hash_map" ref="d827bb5e4f61de1916ab67d51c7c6e60" args="(const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#d827bb5e4f61de1916ab67d51c7c6e60">concurrent_hash_map</a> (const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a4612d5c7233712d455496641e9b31ff"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::concurrent_hash_map" ref="a4612d5c7233712d455496641e9b31ff" args="(size_type n, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#a4612d5c7233712d455496641e9b31ff">concurrent_hash_map</a> (size_type n, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="df0cd14eaddb17f10929c91519e65be9"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::concurrent_hash_map" ref="df0cd14eaddb17f10929c91519e65be9" args="(const concurrent_hash_map &amp;table, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#df0cd14eaddb17f10929c91519e65be9">concurrent_hash_map</a> (const <a class="el" href="a00160.html">concurrent_hash_map</a> &amp;table, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy constructor. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="3bfe75fcb350ce39cf610c164f233edc"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::concurrent_hash_map" ref="3bfe75fcb350ce39cf610c164f233edc" args="(I first, I last, const allocator_type &amp;a=allocator_type())" -->
+template&lt;typename I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00160.html#3bfe75fcb350ce39cf610c164f233edc">concurrent_hash_map</a> (I first, I last, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construction with copying iteration range and given allocator instance. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c0c42a2e1b5282b6739157df9ce2304"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::operator=" ref="2c0c42a2e1b5282b6739157df9ce2304" args="(const concurrent_hash_map &amp;table)" -->
+<a class="el" href="a00160.html">concurrent_hash_map</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">operator=</a> (const <a class="el" href="a00160.html">concurrent_hash_map</a> &amp;table)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Assignment. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#13f3f2e8de7564be03882c31559493c9">rehash</a> (size_type n=0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Rehashes and optionally resizes the whole table.  <a href="#13f3f2e8de7564be03882c31559493c9"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a9f89be8fe28835749529d91081a2511"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::clear" ref="a9f89be8fe28835749529d91081a2511" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#a9f89be8fe28835749529d91081a2511">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a1ac58997d8fbf242b266e3691573481"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::~concurrent_hash_map" ref="a1ac58997d8fbf242b266e3691573481" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#a1ac58997d8fbf242b266e3691573481">~concurrent_hash_map</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear table and destroy it. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1d279026ce93bbef47a5b8a028cd387b"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::range" ref="1d279026ce93bbef47a5b8a028cd387b" args="(size_type grainsize=1)" -->
+range_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>range</b> (size_type grainsize=1)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="728c7aef8f1326f3f9ca6b4a6d1155cf"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::range" ref="728c7aef8f1326f3f9ca6b4a6d1155cf" args="(size_type grainsize=1) const " -->
+const_range_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>range</b> (size_type grainsize=1) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4a3c58cf1234b74ca796dcf555d32f53"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::begin" ref="4a3c58cf1234b74ca796dcf555d32f53" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>begin</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="28c690486d8db5783475f5b1a59d21bc"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::end" ref="28c690486d8db5783475f5b1a59d21bc" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>end</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="aa51c059c92aaf259916be74b928adb2"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::begin" ref="aa51c059c92aaf259916be74b928adb2" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>begin</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d2610bf00a3f631719308bdb59876f08"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::end" ref="d2610bf00a3f631719308bdb59876f08" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>end</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5faacd9a290b122947a21f72c676a0b9"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::equal_range" ref="5faacd9a290b122947a21f72c676a0b9" args="(const Key &amp;key)" -->
+std::pair&lt; iterator, iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>equal_range</b> (const Key &amp;key)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4abad08a1788b57a78698f16fab92838"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::equal_range" ref="4abad08a1788b57a78698f16fab92838" args="(const Key &amp;key) const " -->
+std::pair&lt; const_iterator,<br>
+ const_iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>equal_range</b> (const Key &amp;key) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="17fd8c5fe8c6a86075f34aa4e8412ba3"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::size" ref="17fd8c5fe8c6a86075f34aa4e8412ba3" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Number of items in table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6cab7d029a3e73a653ef0faeac4d1586"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::empty" ref="6cab7d029a3e73a653ef0faeac4d1586" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if <a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">size()</a>==0. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1e45d3cbd1e2ae06f365f1b48e0df0b5"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::max_size" ref="1e45d3cbd1e2ae06f365f1b48e0df0b5" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#1e45d3cbd1e2ae06f365f1b48e0df0b5">max_size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Upper bound on size. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="414d15033d36c63aa3a40666dc4d6f5e"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_count" ref="414d15033d36c63aa3a40666dc4d6f5e" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#414d15033d36c63aa3a40666dc4d6f5e">bucket_count</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns the current number of buckets. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="199208eed6f09e200cda364f906be0fe"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::get_allocator" ref="199208eed6f09e200cda364f906be0fe" args="() const " -->
+allocator_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#199208eed6f09e200cda364f906be0fe">get_allocator</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">return allocator object <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eddb0d2efe0b4f25a85c059e1c3dac15"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::swap" ref="eddb0d2efe0b4f25a85c059e1c3dac15" args="(concurrent_hash_map &amp;table)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">swap</a> (<a class="el" href="a00160.html">concurrent_hash_map</a> &amp;table)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">swap two instances. Iterators are invalidated <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="74f5ef06a06c5e619f156a1c76c04969"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::count" ref="74f5ef06a06c5e619f156a1c76c04969" args="(const Key &amp;key) const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">count</a> (const Key &amp;key) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return count of items (0 or 1). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#64338d7f2e35df586af4cb0145cd910f">find</a> (<a class="el" href="a00163.html">const_accessor</a> &amp;result, const Key &amp;key) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Find item and acquire a read lock on the item.  <a href="#64338d7f2e35df586af4cb0145cd910f"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#bce7bdf46435115a95cca2aa73c5da83">find</a> (<a class="el" href="a00161.html">accessor</a> &amp;result, const Key &amp;key)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Find item and acquire a write lock on the item.  <a href="#bce7bdf46435115a95cca2aa73c5da83"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#58c38b27273de6c670568633c0931854">insert</a> (<a class="el" href="a00163.html">const_accessor</a> &amp;result, const Key &amp;key)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert item (if not already present) and acquire a read lock on the item.  <a href="#58c38b27273de6c670568633c0931854"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#ccfecaa3e71d92be61fb3d811dd264eb">insert</a> (<a class="el" href="a00161.html">accessor</a> &amp;result, const Key &amp;key)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert item (if not already present) and acquire a write lock on the item.  <a href="#ccfecaa3e71d92be61fb3d811dd264eb"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#d4a2816129e38c53128c6d0c7b6b7370">insert</a> (<a class="el" href="a00163.html">const_accessor</a> &amp;result, const value_type &amp;value)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert item by copying if there is no such key present already and acquire a read lock on the item.  <a href="#d4a2816129e38c53128c6d0c7b6b7370"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#a657e61cd2b13164764ca2708875784a">insert</a> (<a class="el" href="a00161.html">accessor</a> &amp;result, const value_type &amp;value)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert item by copying if there is no such key present already and acquire a write lock on the item.  <a href="#a657e61cd2b13164764ca2708875784a"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#52bffd1066b3d7b793945bc6fa1a71a1">insert</a> (const value_type &amp;value)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert item by copying if there is no such key present already.  <a href="#52bffd1066b3d7b793945bc6fa1a71a1"></a><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="cfe172677e5987004ef4a03e22fa338a"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="cfe172677e5987004ef4a03e22fa338a" args="(I first, I last)" -->
+template&lt;typename I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00160.html#cfe172677e5987004ef4a03e22fa338a">insert</a> (I first, I last)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert range [first, last). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#034c3b3ee419edee78e0f2f2b1f0d7ca">erase</a> (const Key &amp;key)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Erase item.  <a href="#034c3b3ee419edee78e0f2f2b1f0d7ca"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#da7e4a50f6bb06191817425ec85fe760">erase</a> (<a class="el" href="a00163.html">const_accessor</a> &amp;item_accessor)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Erase item by <a class="el" href="a00163.html">const_accessor</a>.  <a href="#da7e4a50f6bb06191817425ec85fe760"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">erase</a> (<a class="el" href="a00161.html">accessor</a> &amp;item_accessor)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Erase item by accessor.  <a href="#0f500842d0cf791f8fa61662edb1b311"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6bea55a4e94be2ab299de06dc266f3a3"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::node_allocator_type" ref="6bea55a4e94be2ab299de06dc266f3a3" args="" -->
+typedef Allocator::template <br>
+rebind&lt; node &gt;::other&nbsp;</td><td class="memItemRight" valign="bottom"><b>node_allocator_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Protected Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="53a98a10a2adb33d91a286a487d0cd78"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::delete_node" ref="53a98a10a2adb33d91a286a487d0cd78" args="(node_base *n)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>delete_node</b> (node_base *n)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6d30c4c2698ea07ed6dd0e9eaf774b11"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::search_bucket" ref="6d30c4c2698ea07ed6dd0e9eaf774b11" args="(const key_type &amp;key, bucket *b) const " -->
+node *&nbsp;</td><td class="memItemRight" valign="bottom"><b>search_bucket</b> (const key_type &amp;key, bucket *b) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="571d635fd206d9985cf20a1a659ea476"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::rehash_bucket" ref="571d635fd206d9985cf20a1a659ea476" args="(bucket *b_new, const hashcode_t h)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>rehash_bucket</b> (bucket *b_new, const hashcode_t h)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1f22480a290ddc6c145888d8f985531a"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::lookup" ref="1f22480a290ddc6c145888d8f985531a" args="(bool op_insert, const Key &amp;key, const T *t, const_accessor *result, bool write)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#1f22480a290ddc6c145888d8f985531a">lookup</a> (bool op_insert, const Key &amp;key, const T *t, <a class="el" href="a00163.html">const_accessor</a> *result, bool write)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Insert or find item and optionally acquire a lock on the item. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="faad2108bd2be75e52293486af59f11e"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::exclude" ref="faad2108bd2be75e52293486af59f11e" args="(const_accessor &amp;item_accessor, bool readonly)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#faad2108bd2be75e52293486af59f11e">exclude</a> (<a class="el" href="a00163.html">const_accessor</a> &amp;item_accessor, bool readonly)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">delete item by accessor <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="976c57edfb7f22b9f91a2e11f141eb4a"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal_equal_range" ref="976c57edfb7f22b9f91a2e11f141eb4a" args="(const Key &amp;key, I end) const " -->
+template&lt;typename I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">std::pair&lt; I, I &gt;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00160.html#976c57edfb7f22b9f91a2e11f141eb4a">internal_equal_range</a> (const Key &amp;key, I end) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns an iterator for an item defined by the key, or for the next item after it (if upper==true). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3c27779fe66b79505390d084310d997e"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal_copy" ref="3c27779fe66b79505390d084310d997e" args="(const concurrent_hash_map &amp;source)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#3c27779fe66b79505390d084310d997e">internal_copy</a> (const <a class="el" href="a00160.html">concurrent_hash_map</a> &amp;source)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy "source" to *this, where *this must start out empty. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="cf43170691e36146a1bff61e3cf895ce"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal_copy" ref="cf43170691e36146a1bff61e3cf895ce" args="(I first, I last)" -->
+template&lt;typename I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>internal_copy</b> (I first, I last)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html#2f76ed101a0ccc8875b846c2f747897e">internal_fast_find</a> (const Key &amp;key) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Fast find when no concurrent erasure is used. For internal use inside TBB only!  <a href="#2f76ed101a0ccc8875b846c2f747897e"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="252e91d8029f6308db7179557e3b1436"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::my_allocator" ref="252e91d8029f6308db7179557e3b1436" args="" -->
+node_allocator_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>my_allocator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f738f241c8500ce3dbf0f9028ca8b602"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::my_hash_compare" ref="f738f241c8500ce3dbf0f9028ca8b602" args="" -->
+HashCompare&nbsp;</td><td class="memItemRight" valign="bottom"><b>my_hash_compare</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e24acd2f6849db3377a3942807639758"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal::hash_map_iterator" ref="e24acd2f6849db3377a3942807639758" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::hash_map_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c94f21746c8902f7e0b5115a8d4da1d2"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal::hash_map_range" ref="c94f21746c8902f7e0b5115a8d4da1d2" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::hash_map_range</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c0028dfa75a6baa14007355ab1ef7fc"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor" ref="2c0028dfa75a6baa14007355ab1ef7fc" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_accessor</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00161.html">accessor</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allows write access to elements and combines data access, locking, and garbage collection.  <a href="a00161.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00162.html">bucket_accessor</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">bucket accessor is to find, rehash, acquire a lock, and access a bucket  <a href="a00162.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html">const_accessor</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Combines data access, locking, and garbage collection.  <a href="a00163.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>node</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt;<br>
+ class tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</h3>
+
+Unordered map from Key to T. 
+<p>
+<a class="el" href="a00160.html">concurrent_hash_map</a> is associative container with concurrent access.<p>
+<dl compact><dt><b>Compatibility</b></dt><dd>The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).</dd></dl>
+<dl compact><dt><b>Exception Safety</b></dt><dd><ul>
+<li>Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.</li><li>If exception happens during <a class="el" href="a00160.html#58c38b27273de6c670568633c0931854">insert()</a> operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).</li><li>If exception happens during <a class="el" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">operator=()</a> operation, the container can have a part of source items, and methods <a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">size()</a> and <a class="el" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">empty()</a> can return wrong results.</li></ul>
+</dd></dl>
+<dl compact><dt><b>Changes since TBB 2.1</b></dt><dd><ul>
+<li>Replaced internal algorithm and data structure. Patent is pending.</li><li>Added buckets number argument for constructor</li></ul>
+</dd></dl>
+<dl compact><dt><b>Changes since TBB 2.0</b></dt><dd><ul>
+<li>Fixed exception-safety</li><li>Added template argument for allocator</li><li>Added allocator argument in constructors</li><li>Added constructor from a range of iterators</li><li>Added several new overloaded <a class="el" href="a00160.html#58c38b27273de6c670568633c0931854">insert()</a> methods</li><li>Added <a class="el" href="a00160.html#199208eed6f09e200cda364f906be0fe">get_allocator()</a></li><li>Added <a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">swap()</a></li><li>Added <a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">count()</a></li><li>Added overloaded <a class="el" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">erase(accessor &amp;)</a> and <a class="el" href="a00160.html#da7e4a50f6bb06191817425ec85fe760">erase(const_accessor&amp;)</a></li><li>Added equal_range() [const]</li><li>Added [const_]pointer, [const_]reference, and allocator_type types</li><li>Added global functions: operator==(), operator!=(), and <a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">swap()</a> </li></ul>
+</dd></dl>
+
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="0f500842d0cf791f8fa61662edb1b311"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::erase" ref="0f500842d0cf791f8fa61662edb1b311" args="(accessor &amp;item_accessor)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::erase           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00161.html">accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>item_accessor</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Erase item by accessor. 
+<p>
+Return true if item was erased by particularly this call. 
+</div>
+</div><p>
+<a class="anchor" name="da7e4a50f6bb06191817425ec85fe760"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::erase" ref="da7e4a50f6bb06191817425ec85fe760" args="(const_accessor &amp;item_accessor)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::erase           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00163.html">const_accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>item_accessor</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Erase item by <a class="el" href="a00163.html">const_accessor</a>. 
+<p>
+Return true if item was erased by particularly this call. 
+</div>
+</div><p>
+<a class="anchor" name="034c3b3ee419edee78e0f2f2b1f0d7ca"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::erase" ref="034c3b3ee419edee78e0f2f2b1f0d7ca" args="(const Key &amp;key)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, A &gt;::erase           </td>
+          <td>(</td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Erase item. 
+<p>
+Return true if item was erased by particularly this call. 
+</div>
+</div><p>
+<a class="anchor" name="bce7bdf46435115a95cca2aa73c5da83"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::find" ref="bce7bdf46435115a95cca2aa73c5da83" args="(accessor &amp;result, const Key &amp;key)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::find           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00161.html">accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Find item and acquire a write lock on the item. 
+<p>
+Return true if item is found, false otherwise. 
+</div>
+</div><p>
+<a class="anchor" name="64338d7f2e35df586af4cb0145cd910f"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::find" ref="64338d7f2e35df586af4cb0145cd910f" args="(const_accessor &amp;result, const Key &amp;key) const " -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::find           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00163.html">const_accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"> const<code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Find item and acquire a read lock on the item. 
+<p>
+Return true if item is found, false otherwise. 
+</div>
+</div><p>
+<a class="anchor" name="52bffd1066b3d7b793945bc6fa1a71a1"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="52bffd1066b3d7b793945bc6fa1a71a1" args="(const value_type &amp;value)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::insert           </td>
+          <td>(</td>
+          <td class="paramtype">const value_type &amp;&nbsp;</td>
+          <td class="paramname"> <em>value</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Insert item by copying if there is no such key present already. 
+<p>
+Returns true if item is inserted. 
+</div>
+</div><p>
+<a class="anchor" name="a657e61cd2b13164764ca2708875784a"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="a657e61cd2b13164764ca2708875784a" args="(accessor &amp;result, const value_type &amp;value)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::insert           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00161.html">accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const value_type &amp;&nbsp;</td>
+          <td class="paramname"> <em>value</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Insert item by copying if there is no such key present already and acquire a write lock on the item. 
+<p>
+Returns true if item is new. 
+</div>
+</div><p>
+<a class="anchor" name="d4a2816129e38c53128c6d0c7b6b7370"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="d4a2816129e38c53128c6d0c7b6b7370" args="(const_accessor &amp;result, const value_type &amp;value)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::insert           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00163.html">const_accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const value_type &amp;&nbsp;</td>
+          <td class="paramname"> <em>value</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Insert item by copying if there is no such key present already and acquire a read lock on the item. 
+<p>
+Returns true if item is new. 
+</div>
+</div><p>
+<a class="anchor" name="ccfecaa3e71d92be61fb3d811dd264eb"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="ccfecaa3e71d92be61fb3d811dd264eb" args="(accessor &amp;result, const Key &amp;key)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::insert           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00161.html">accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Insert item (if not already present) and acquire a write lock on the item. 
+<p>
+Returns true if item is new. 
+</div>
+</div><p>
+<a class="anchor" name="58c38b27273de6c670568633c0931854"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::insert" ref="58c38b27273de6c670568633c0931854" args="(const_accessor &amp;result, const Key &amp;key)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::insert           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00163.html">const_accessor</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Insert item (if not already present) and acquire a read lock on the item. 
+<p>
+Returns true if item is new. 
+</div>
+</div><p>
+<a class="anchor" name="2f76ed101a0ccc8875b846c2f747897e"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::internal_fast_find" ref="2f76ed101a0ccc8875b846c2f747897e" args="(const Key &amp;key) const " -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">const_pointer <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, Allocator &gt;::internal_fast_find           </td>
+          <td>(</td>
+          <td class="paramtype">const Key &amp;&nbsp;</td>
+          <td class="paramname"> <em>key</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"> const<code> [inline, protected]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Fast find when no concurrent erasure is used. For internal use inside TBB only! 
+<p>
+Return pointer to item with given key, or NULL if no such item exists. Must not be called concurrently with erasure operations. 
+</div>
+</div><p>
+<a class="anchor" name="13f3f2e8de7564be03882c31559493c9"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::rehash" ref="13f3f2e8de7564be03882c31559493c9" args="(size_type n=0)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Key, typename T, typename HashCompare, typename A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map</a>&lt; Key, T, HashCompare, A &gt;::rehash           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>n</em> = <code>0</code>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Rehashes and optionally resizes the whole table. 
+<p>
+Useful to optimize performance before or after concurrent operations. Also enables using of <a class="el" href="a00160.html#64338d7f2e35df586af4cb0145cd910f">find()</a> and <a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">count()</a> concurrent methods in serial context. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00331.html">concurrent_hash_map.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.html
new file mode 100644 (file)
index 0000000..ced1ab5
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface4</b>::<a class="el" href="a00160.html">concurrent_hash_map</a>::<a class="el" href="a00161.html">accessor</a></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor Class Reference</h1><!-- doxytag: class="tbb::interface4::concurrent_hash_map::accessor" --><!-- doxytag: inherits="tbb::interface4::concurrent_hash_map::const_accessor" -->Allows write access to elements and combines data access, locking, and garbage collection.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00331.html">concurrent_hash_map.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor:
+<p><center><img src="a00161.png" usemap="#tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::accessor_map" border="0" alt=""></center>
+<map name="tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::accessor_map">
+<area href="a00163.html" alt="tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor" shape="rect" coords="0,0,530,24">
+</map>
+<a href="a00017.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="49eec74f272bab187d176c0d9d16a7fe"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::accessor::value_type" ref="49eec74f272bab187d176c0d9d16a7fe" args="" -->
+typedef concurrent_hash_map::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00161.html#49eec74f272bab187d176c0d9d16a7fe">value_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type of value. <br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8938f0cd1211e88a1d73527ed3636c4"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::accessor::operator *" ref="e8938f0cd1211e88a1d73527ed3636c4" args="() const " -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00161.html#e8938f0cd1211e88a1d73527ed3636c4">operator *</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return reference to associated value in hash table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fcebc32c020202cc37e60eadef157569"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::accessor::operator-&gt;" ref="fcebc32c020202cc37e60eadef157569" args="() const " -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00161.html#fcebc32c020202cc37e60eadef157569">operator-&gt;</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return pointer to associated value in hash table. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt;<br>
+ class tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</h3>
+
+Allows write access to elements and combines data access, locking, and garbage collection. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00331.html">concurrent_hash_map.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.png
new file mode 100644 (file)
index 0000000..a40bd94
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00161.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00162.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00162.html
new file mode 100644 (file)
index 0000000..3a5e24e
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface4</b>::<a class="el" href="a00160.html">concurrent_hash_map</a>::<a class="el" href="a00162.html">bucket_accessor</a></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor Class Reference</h1><!-- doxytag: class="tbb::interface4::concurrent_hash_map::bucket_accessor" -->bucket accessor is to find, rehash, acquire a lock, and access a bucket  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00331.html">concurrent_hash_map.h</a>&gt;</code>
+<p>
+<a href="a00015.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5aa0b492fb4260a940ea7bda2ef486e2"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_accessor::bucket_accessor" ref="5aa0b492fb4260a940ea7bda2ef486e2" args="(concurrent_hash_map *base, const hashcode_t h, bool writer=false)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>bucket_accessor</b> (<a class="el" href="a00160.html">concurrent_hash_map</a> *base, const hashcode_t h, bool writer=false)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="26b4fe0ca87a7ad4852cb787db880119"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_accessor::acquire" ref="26b4fe0ca87a7ad4852cb787db880119" args="(concurrent_hash_map *base, const hashcode_t h, bool writer=false)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00162.html#26b4fe0ca87a7ad4852cb787db880119">acquire</a> (<a class="el" href="a00160.html">concurrent_hash_map</a> *base, const hashcode_t h, bool writer=false)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">find a bucket by masked hashcode, optionally rehash, and acquire the lock <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fc194e3a186dc935a5fb513cc9f8e898"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_accessor::is_writer" ref="fc194e3a186dc935a5fb513cc9f8e898" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">is_writer</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">check whether bucket is locked for write <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="57c6110bd20e95c06de5a199de988941"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_accessor::operator()" ref="57c6110bd20e95c06de5a199de988941" args="()" -->
+bucket *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00162.html#57c6110bd20e95c06de5a199de988941">operator()</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">get bucket pointer <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8f7f0dc61f528de29d06e6054b4a9835"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::bucket_accessor::upgrade_to_writer" ref="8f7f0dc61f528de29d06e6054b4a9835" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>upgrade_to_writer</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt;<br>
+ class tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</h3>
+
+bucket accessor is to find, rehash, acquire a lock, and access a bucket 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00331.html">concurrent_hash_map.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.html
new file mode 100644 (file)
index 0000000..3da1455
--- /dev/null
@@ -0,0 +1,91 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface4</b>::<a class="el" href="a00160.html">concurrent_hash_map</a>::<a class="el" href="a00163.html">const_accessor</a></div>
+<h1>tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor Class Reference</h1><!-- doxytag: class="tbb::interface4::concurrent_hash_map::const_accessor" -->Combines data access, locking, and garbage collection.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00331.html">concurrent_hash_map.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor:
+<p><center><img src="a00163.png" usemap="#tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor_map" border="0" alt=""></center>
+<map name="tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::const_accessor_map">
+<area href="a00161.html" alt="tbb::interface4::concurrent_hash_map< Key, T, HashCompare, Allocator >::accessor" shape="rect" coords="0,56,530,80">
+</map>
+<a href="a00016.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="48647ca0d79c1233b997f5768403c926"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::value_type" ref="48647ca0d79c1233b997f5768403c926" args="" -->
+typedef const concurrent_hash_map::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#48647ca0d79c1233b997f5768403c926">value_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type of value. <br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5cce3104cb0a52e08d2131370871c614"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::empty" ref="5cce3104cb0a52e08d2131370871c614" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#5cce3104cb0a52e08d2131370871c614">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if result is empty. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d5ce4f88d8870290238a8ad621e6f270"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::release" ref="d5ce4f88d8870290238a8ad621e6f270" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set to null. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="30f31106840700a4c3664b9cb1c31ca7"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::operator *" ref="30f31106840700a4c3664b9cb1c31ca7" args="() const " -->
+const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#30f31106840700a4c3664b9cb1c31ca7">operator *</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return reference to associated value in hash table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3d03a48ecb8cd9549bd8be64b09c9b0d"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::operator-&gt;" ref="3d03a48ecb8cd9549bd8be64b09c9b0d" args="() const " -->
+const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#3d03a48ecb8cd9549bd8be64b09c9b0d">operator-&gt;</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return pointer to associated value in hash table. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a9ead65cca68d4c49c7ef64d7899a4c8"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::const_accessor" ref="a9ead65cca68d4c49c7ef64d7899a4c8" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">const_accessor</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Create empty result. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="752b0c1ec74b94786403a75e42917d01"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::~const_accessor" ref="752b0c1ec74b94786403a75e42917d01" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00163.html#752b0c1ec74b94786403a75e42917d01">~const_accessor</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy result after releasing the underlying reference. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d062d8ecb243a6ec62fa30bca52a1bcf"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;" ref="d062d8ecb243a6ec62fa30bca52a1bcf" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ddd77aff56c12366acc02d0f1588a706"></a><!-- doxytag: member="tbb::interface4::concurrent_hash_map::const_accessor::accessor" ref="ddd77aff56c12366acc02d0f1588a706" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>accessor</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Key, typename T, typename HashCompare, typename Allocator&gt;<br>
+ class tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</h3>
+
+Combines data access, locking, and garbage collection. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00331.html">concurrent_hash_map.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.png
new file mode 100644 (file)
index 0000000..f39e102
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00163.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00164.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00164.html
new file mode 100644 (file)
index 0000000..d0236a0
--- /dev/null
@@ -0,0 +1,169 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::strict_ppl::concurrent_queue&lt; T, A &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00279.html">strict_ppl</a>::<a class="el" href="a00164.html">concurrent_queue</a></div>
+<h1>tbb::strict_ppl::concurrent_queue&lt; T, A &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::strict_ppl::concurrent_queue" -->A high-performance thread-safe non-blocking concurrent queue.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00341.html">concurrent_queue.h</a>&gt;</code>
+<p>
+<a href="a00018.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="682c3978d5cb0620000994f11c44a476"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::value_type" ref="682c3978d5cb0620000994f11c44a476" args="" -->
+typedef T&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#682c3978d5cb0620000994f11c44a476">value_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Element type in the queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a8d725c50a9834bb7af5b67c0aff92b8"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::reference" ref="a8d725c50a9834bb7af5b67c0aff92b8" args="" -->
+typedef T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">reference</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Reference type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4d48e7ff93f81636bca2c74f7da34750"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::const_reference" ref="4d48e7ff93f81636bca2c74f7da34750" args="" -->
+typedef const T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">const_reference</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Const reference type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8fc30e93f8342a1960357f71e4fe8a2b"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::size_type" ref="8fc30e93f8342a1960357f71e4fe8a2b" args="" -->
+typedef size_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">size_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Integral type for representing size of the queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="068576d16c7e4e05d52f9db7a45b5b65"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::difference_type" ref="068576d16c7e4e05d52f9db7a45b5b65" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">difference_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Difference type for iterator. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5a3956341728eaa558d8827063718cac"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::allocator_type" ref="5a3956341728eaa558d8827063718cac" args="" -->
+typedef A&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocator type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9736ac047d6da3363895c7e5b121d0c4"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::iterator" ref="9736ac047d6da3363895c7e5b121d0c4" args="" -->
+typedef internal::concurrent_queue_iterator&lt;<br>
+ <a class="el" href="a00164.html">concurrent_queue</a>, T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9cd76d19af8c78be1bafc6ca0123ac0d"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::const_iterator" ref="9cd76d19af8c78be1bafc6ca0123ac0d" args="" -->
+typedef internal::concurrent_queue_iterator&lt;<br>
+ <a class="el" href="a00164.html">concurrent_queue</a>, const T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7c48a05a94a1f4f98fdfadfbef98ecf6"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::concurrent_queue" ref="7c48a05a94a1f4f98fdfadfbef98ecf6" args="(const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">concurrent_queue</a> (const <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a> &amp;a=<a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty queue. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="25209656c84f2f9b030e2f9162713341"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::concurrent_queue" ref="25209656c84f2f9b030e2f9162713341" args="(InputIterator begin, InputIterator end, const allocator_type &amp;a=allocator_type())" -->
+template&lt;typename InputIterator&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00164.html#25209656c84f2f9b030e2f9162713341">concurrent_queue</a> (InputIterator begin, InputIterator end, const <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a> &amp;a=<a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">[begin,end) constructor <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8a6b98ea11a867db8ac868f0113ca429"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::concurrent_queue" ref="8a6b98ea11a867db8ac868f0113ca429" args="(const concurrent_queue &amp;src, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#8a6b98ea11a867db8ac868f0113ca429">concurrent_queue</a> (const <a class="el" href="a00164.html">concurrent_queue</a> &amp;src, const <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a> &amp;a=<a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy constructor. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="830b33753d6b149c366344e29b2edd8c"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::~concurrent_queue" ref="830b33753d6b149c366344e29b2edd8c" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#830b33753d6b149c366344e29b2edd8c">~concurrent_queue</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="73c47563ffcc4c2f6452f25a04ebe2e2"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::push" ref="73c47563ffcc4c2f6452f25a04ebe2e2" args="(const T &amp;source)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">push</a> (const T &amp;source)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enqueue an item at tail of queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">try_pop</a> (T &amp;result)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Attempt to dequeue an item from head of queue.  <a href="#ae31ca0db34ef96ef1e74aa0d28c95f8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eaa35a5274606779802e9a669a706260"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::unsafe_size" ref="eaa35a5274606779802e9a669a706260" args="() const " -->
+<a class="el" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">size_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#eaa35a5274606779802e9a669a706260">unsafe_size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return the number of items in the queue; thread unsafe. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f3f6fce0cfa2d581d6f3b47e0613ad64"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::empty" ref="f3f6fce0cfa2d581d6f3b47e0613ad64" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Equivalent to size()==0. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c32e8e84c0524155133b4aae32d2a827"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::clear" ref="c32e8e84c0524155133b4aae32d2a827" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear the queue. not thread-safe. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f034f70caef445fe8abc9113ec926a8d"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::get_allocator" ref="f034f70caef445fe8abc9113ec926a8d" args="() const " -->
+<a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">get_allocator</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return allocator object. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6297f48808dd7c14e6c2fe81559ca190"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::unsafe_begin" ref="6297f48808dd7c14e6c2fe81559ca190" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_begin</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1569f70e70521fe71944a5b0138c6ef5"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::unsafe_end" ref="1569f70e70521fe71944a5b0138c6ef5" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_end</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e3e6746f7b6ecfbc16f781665ac51112"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::unsafe_begin" ref="e3e6746f7b6ecfbc16f781665ac51112" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_begin</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="83bf90849c08f44d25dd7a3b207a8956"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::unsafe_end" ref="83bf90849c08f44d25dd7a3b207a8956" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>unsafe_end</b> () const </td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05bffad9626d51abbd4bb82ae2a1cceb"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::internal::concurrent_queue_iterator" ref="05bffad9626d51abbd4bb82ae2a1cceb" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::concurrent_queue_iterator</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, typename A = cache_aligned_allocator&lt;T&gt;&gt;<br>
+ class tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</h3>
+
+A high-performance thread-safe non-blocking concurrent queue. 
+<p>
+Multiple threads may each push and pop concurrently. Assignment construction is not allowed. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="ae31ca0db34ef96ef1e74aa0d28c95f8"></a><!-- doxytag: member="tbb::strict_ppl::concurrent_queue::try_pop" ref="ae31ca0db34ef96ef1e74aa0d28c95f8" args="(T &amp;result)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, typename A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue</a>&lt; T, A &gt;::try_pop           </td>
+          <td>(</td>
+          <td class="paramtype">T &amp;&nbsp;</td>
+          <td class="paramname"> <em>result</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Attempt to dequeue an item from head of queue. 
+<p>
+Does not wait for item to become available. Returns true if successful; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00341.html">concurrent_queue.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.html
new file mode 100644 (file)
index 0000000..8e7a4ea
--- /dev/null
@@ -0,0 +1,154 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::deprecated::concurrent_queue&lt; T, A &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>deprecated</b>::<a class="el" href="a00165.html">concurrent_queue</a></div>
+<h1>tbb::deprecated::concurrent_queue&lt; T, A &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::deprecated::concurrent_queue" --><!-- doxytag: inherits="tbb::concurrent_bounded_queue" -->A high-performance thread-safe blocking concurrent bounded queue.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00341.html">concurrent_queue.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::deprecated::concurrent_queue&lt; T, A &gt;:
+<p><center><img src="a00165.png" usemap="#tbb::deprecated::concurrent_queue< T, A >_map" border="0" alt=""></center>
+<map name="tbb::deprecated::concurrent_queue< T, A >_map">
+<area href="a00159.html" alt="tbb::concurrent_bounded_queue< T, A >" shape="rect" coords="0,0,257,24">
+</map>
+<a href="a00021.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8861a9cdf232a20b5f2569754a281871"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::iterator" ref="8861a9cdf232a20b5f2569754a281871" args="" -->
+typedef <a class="el" href="a00159.html">concurrent_bounded_queue</a>&lt;<br>
+ T, A &gt;::iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7117842ce3ed7c5147d3c886922e64d0"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::const_iterator" ref="7117842ce3ed7c5147d3c886922e64d0" args="" -->
+typedef <a class="el" href="a00159.html">concurrent_bounded_queue</a>&lt;<br>
+ T, A &gt;::const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="aaf19bd7337b72f3131ece60f7315ef7"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::concurrent_queue" ref="aaf19bd7337b72f3131ece60f7315ef7" args="(const A &amp;a=A())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">concurrent_queue</a> (const A &amp;a=A())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty queue. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fc092b9082f233482f3513fc3bb670f7"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::concurrent_queue" ref="fc092b9082f233482f3513fc3bb670f7" args="(const concurrent_queue &amp;src, const A &amp;a=A())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00165.html#fc092b9082f233482f3513fc3bb670f7">concurrent_queue</a> (const <a class="el" href="a00165.html">concurrent_queue</a> &amp;src, const A &amp;a=A())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy constructor. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="9102b897776bd2d9e908e6604ff16b5f"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::concurrent_queue" ref="9102b897776bd2d9e908e6604ff16b5f" args="(InputIterator b, InputIterator e, const A &amp;a=A())" -->
+template&lt;typename InputIterator&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00165.html#9102b897776bd2d9e908e6604ff16b5f">concurrent_queue</a> (InputIterator b, InputIterator e, const A &amp;a=A())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">[begin,end) constructor <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">push_if_not_full</a> (const T &amp;source)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enqueue an item at tail of queue if queue is not already full.  <a href="#7c45561bafe71107d09b2bc1b8f4e681"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00165.html#48da3536245318af6cb5fd58bac78039">pop_if_present</a> (T &amp;destination)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Attempt to dequeue an item from head of queue.  <a href="#48da3536245318af6cb5fd58bac78039"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ed7202cd273ae36463e6ac57e9472670"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::begin" ref="ed7202cd273ae36463e6ac57e9472670" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>begin</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="35fbb0d7e135545eb1daec2b4ae894cd"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::end" ref="35fbb0d7e135545eb1daec2b4ae894cd" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>end</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="62f31fe653e1158e64ddb0e163d0335c"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::begin" ref="62f31fe653e1158e64ddb0e163d0335c" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>begin</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3d42ddc4a6fe42350750a930302863d1"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::end" ref="3d42ddc4a6fe42350750a930302863d1" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><b>end</b> () const </td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05bffad9626d51abbd4bb82ae2a1cceb"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::internal::concurrent_queue_iterator" ref="05bffad9626d51abbd4bb82ae2a1cceb" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::concurrent_queue_iterator</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt;<br>
+ class tbb::deprecated::concurrent_queue&lt; T, A &gt;</h3>
+
+A high-performance thread-safe blocking concurrent bounded queue. 
+<p>
+This is the pre-PPL TBB concurrent queue which support boundedness and blocking semantics. Note that method names agree with the PPL-style concurrent queue. Multiple threads may each push and pop concurrently. Assignment construction is not allowed. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="48da3536245318af6cb5fd58bac78039"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::pop_if_present" ref="48da3536245318af6cb5fd58bac78039" args="(T &amp;destination)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue</a>&lt; T, A &gt;::pop_if_present           </td>
+          <td>(</td>
+          <td class="paramtype">T &amp;&nbsp;</td>
+          <td class="paramname"> <em>destination</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Attempt to dequeue an item from head of queue. 
+<p>
+Does not wait for item to become available. Returns true if successful; false otherwise. <dl compact><dt><b><a class="el" href="deprecated.html#_deprecated000001">Deprecated:</a></b></dt><dd>Use <a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop()</a> </dd></dl>
+
+</div>
+</div><p>
+<a class="anchor" name="7c45561bafe71107d09b2bc1b8f4e681"></a><!-- doxytag: member="tbb::deprecated::concurrent_queue::push_if_not_full" ref="7c45561bafe71107d09b2bc1b8f4e681" args="(const T &amp;source)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A = cache_aligned_allocator&lt;T&gt;&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">bool <a class="el" href="a00165.html">tbb::deprecated::concurrent_queue</a>&lt; T, A &gt;::push_if_not_full           </td>
+          <td>(</td>
+          <td class="paramtype">const T &amp;&nbsp;</td>
+          <td class="paramname"> <em>source</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Enqueue an item at tail of queue if queue is not already full. 
+<p>
+Does not wait for queue to become not full. Returns true if item is pushed; false if queue was already full. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00341.html">concurrent_queue.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.png
new file mode 100644 (file)
index 0000000..6aac5a0
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00165.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00166.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00166.html
new file mode 100644 (file)
index 0000000..15f706a
--- /dev/null
@@ -0,0 +1,596 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::concurrent_vector&lt; T, A &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00166.html">concurrent_vector</a></div>
+<h1>tbb::concurrent_vector&lt; T, A &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::concurrent_vector" -->Concurrent vector container.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00347.html">concurrent_vector.h</a>&gt;</code>
+<p>
+<a href="a00026.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bc85684238d4bb5e422cb8ea0bdc438f"></a><!-- doxytag: member="tbb::concurrent_vector::size_type" ref="bc85684238d4bb5e422cb8ea0bdc438f" args="" -->
+typedef internal::concurrent_vector_base_v3::size_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ac7083635ab59834e9afc15e6c168df5"></a><!-- doxytag: member="tbb::concurrent_vector::allocator_type" ref="ac7083635ab59834e9afc15e6c168df5" args="" -->
+typedef internal::allocator_base&lt;<br>
+ T, A &gt;::allocator_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>allocator_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4543d7d45b2e461d3f8ef416974ec1f1"></a><!-- doxytag: member="tbb::concurrent_vector::value_type" ref="4543d7d45b2e461d3f8ef416974ec1f1" args="" -->
+typedef T&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e655e67dd14ed6d0cbfe7981d5fa350e"></a><!-- doxytag: member="tbb::concurrent_vector::difference_type" ref="e655e67dd14ed6d0cbfe7981d5fa350e" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8c29033785d76e240ea96ae40610275d"></a><!-- doxytag: member="tbb::concurrent_vector::reference" ref="8c29033785d76e240ea96ae40610275d" args="" -->
+typedef T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b0b2a6241ab3fa9ab4f4074893fecd47"></a><!-- doxytag: member="tbb::concurrent_vector::const_reference" ref="b0b2a6241ab3fa9ab4f4074893fecd47" args="" -->
+typedef const T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="74319c908a32d5dcdc6a080f9c2b8803"></a><!-- doxytag: member="tbb::concurrent_vector::pointer" ref="74319c908a32d5dcdc6a080f9c2b8803" args="" -->
+typedef T *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dd80729019220cdb80872ea6b905ffb1"></a><!-- doxytag: member="tbb::concurrent_vector::const_pointer" ref="dd80729019220cdb80872ea6b905ffb1" args="" -->
+typedef const T *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a049674e7f386de3ea09ff2ed6d6bbbe"></a><!-- doxytag: member="tbb::concurrent_vector::iterator" ref="a049674e7f386de3ea09ff2ed6d6bbbe" args="" -->
+typedef internal::vector_iterator&lt;<br>
+ <a class="el" href="a00166.html">concurrent_vector</a>, T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b51a1baa7da67e283806c0d4f2fe9c69"></a><!-- doxytag: member="tbb::concurrent_vector::const_iterator" ref="b51a1baa7da67e283806c0d4f2fe9c69" args="" -->
+typedef internal::vector_iterator&lt;<br>
+ <a class="el" href="a00166.html">concurrent_vector</a>, const <br>
+T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c7e4d252bef7af22286041c39c62abe"></a><!-- doxytag: member="tbb::concurrent_vector::reverse_iterator" ref="2c7e4d252bef7af22286041c39c62abe" args="" -->
+typedef std::reverse_iterator&lt;<br>
+ iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reverse_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c59c3f9412b666a31a9052b789fb58bc"></a><!-- doxytag: member="tbb::concurrent_vector::const_reverse_iterator" ref="c59c3f9412b666a31a9052b789fb58bc" args="" -->
+typedef std::reverse_iterator&lt;<br>
+ const_iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reverse_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0c3901b092e9e8185bdc3cae5913e625"></a><!-- doxytag: member="tbb::concurrent_vector::reverse_iterator" ref="0c3901b092e9e8185bdc3cae5913e625" args="" -->
+typedef std::reverse_iterator&lt;<br>
+ iterator, T, T &amp;, T * &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reverse_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="426145d7bd5913bc08e701c14b221046"></a><!-- doxytag: member="tbb::concurrent_vector::const_reverse_iterator" ref="426145d7bd5913bc08e701c14b221046" args="" -->
+typedef std::reverse_iterator&lt;<br>
+ const_iterator, T, const <br>
+T &amp;, const T * &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reverse_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="61fd90288dba6f8808d6dafe9a249ec5"></a><!-- doxytag: member="tbb::concurrent_vector::range_type" ref="61fd90288dba6f8808d6dafe9a249ec5" args="" -->
+typedef generic_range_type&lt;<br>
+ iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d6ddf0312a820a98efd9a5f394cb9dbd"></a><!-- doxytag: member="tbb::concurrent_vector::const_range_type" ref="d6ddf0312a820a98efd9a5f394cb9dbd" args="" -->
+typedef generic_range_type&lt;<br>
+ const_iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_range_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c8ca9cabfcd30ad5943324c853664b5"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="2c8ca9cabfcd30ad5943324c853664b5" args="(const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#2c8ca9cabfcd30ad5943324c853664b5">concurrent_vector</a> (const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty vector. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dd8a200b99a8088435a37934b58fe335"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="dd8a200b99a8088435a37934b58fe335" args="(const concurrent_vector &amp;vector, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#dd8a200b99a8088435a37934b58fe335">concurrent_vector</a> (const <a class="el" href="a00166.html">concurrent_vector</a> &amp;vector, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copying constructor. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="64432f13f7b29bfe4acfb5568f34f3a8"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="64432f13f7b29bfe4acfb5568f34f3a8" args="(const concurrent_vector&lt; T, M &gt; &amp;vector, const allocator_type &amp;a=allocator_type())" -->
+template&lt;class M&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00166.html#64432f13f7b29bfe4acfb5568f34f3a8">concurrent_vector</a> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, M &gt; &amp;vector, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copying constructor for vector with different allocator type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2a2e261dfe1cab3f73f7b1a94137cfca"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="2a2e261dfe1cab3f73f7b1a94137cfca" args="(size_type n)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#2a2e261dfe1cab3f73f7b1a94137cfca">concurrent_vector</a> (size_type n)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construction with initial size specified by argument n. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3883a8a908b44e249a57f454de3f55d8"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="3883a8a908b44e249a57f454de3f55d8" args="(size_type n, const_reference t, const allocator_type &amp;a=allocator_type())" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#3883a8a908b44e249a57f454de3f55d8">concurrent_vector</a> (size_type n, const_reference t, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="4450de83c5862ea4bcd9443fd7e67419"></a><!-- doxytag: member="tbb::concurrent_vector::concurrent_vector" ref="4450de83c5862ea4bcd9443fd7e67419" args="(I first, I last, const allocator_type &amp;a=allocator_type())" -->
+template&lt;class I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00166.html#4450de83c5862ea4bcd9443fd7e67419">concurrent_vector</a> (I first, I last, const allocator_type &amp;a=allocator_type())</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construction with copying iteration range and given allocator instance. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="691f0f3cda3e489c37a657016e375eaf"></a><!-- doxytag: member="tbb::concurrent_vector::operator=" ref="691f0f3cda3e489c37a657016e375eaf" args="(const concurrent_vector &amp;vector)" -->
+<a class="el" href="a00166.html">concurrent_vector</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#691f0f3cda3e489c37a657016e375eaf">operator=</a> (const <a class="el" href="a00166.html">concurrent_vector</a> &amp;vector)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Assignment. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="19f4ab88a01b0fd056af3bba463e7bd6"></a><!-- doxytag: member="tbb::concurrent_vector::operator=" ref="19f4ab88a01b0fd056af3bba463e7bd6" args="(const concurrent_vector&lt; T, M &gt; &amp;vector)" -->
+template&lt;class M&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top"><a class="el" href="a00166.html">concurrent_vector</a> &amp;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00166.html#19f4ab88a01b0fd056af3bba463e7bd6">operator=</a> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, M &gt; &amp;vector)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Assignment for vector with different allocator type. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#30484e3959892fd5392fa93c873c31f0">grow_by</a> (size_type delta)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Grow by "delta" elements.  <a href="#30484e3959892fd5392fa93c873c31f0"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#c8177b1865270ea68aa1ab9148e5e35e">grow_by</a> (size_type delta)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#38274ab3f772ecba600c7daca7690102">grow_by</a> (size_type delta, const_reference t)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Grow by "delta" elements using copying constuctor.  <a href="#38274ab3f772ecba600c7daca7690102"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#473a59a4c9308b93411b898b3110d26c">grow_by</a> (size_type delta, const_reference t)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#47fe588214dd5fa06ab6e8ab78d83874">grow_to_at_least</a> (size_type n)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Append minimal sequence of elements such that <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size()</a>&gt;=n.  <a href="#47fe588214dd5fa06ab6e8ab78d83874"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#a7e3b67c8ccab16d0aecc80899ae799d">grow_to_at_least</a> (size_type n)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a> (const_reference item) iterator push_back(const _reference item)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Push item.  <a href="#e94e038f915c0268fdf2d3d7f87d81b8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#4c52f2950bb1832886bd4458eb09d7eb">operator[]</a> (size_type index)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get reference to element at given index.  <a href="#4c52f2950bb1832886bd4458eb09d7eb"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c6fade5c732cc95274d1d8277ea619d1"></a><!-- doxytag: member="tbb::concurrent_vector::operator[]" ref="c6fade5c732cc95274d1d8277ea619d1" args="(size_type index) const " -->
+const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#c6fade5c732cc95274d1d8277ea619d1">operator[]</a> (size_type index) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get const reference to element at given index. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0c073ca43e787c7cbf7b0e26d2221748"></a><!-- doxytag: member="tbb::concurrent_vector::at" ref="0c073ca43e787c7cbf7b0e26d2221748" args="(size_type index)" -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#0c073ca43e787c7cbf7b0e26d2221748">at</a> (size_type index)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get reference to element at given index. Throws exceptions on errors. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="23e14a38af748edff96a7adc3a0f1c58"></a><!-- doxytag: member="tbb::concurrent_vector::at" ref="23e14a38af748edff96a7adc3a0f1c58" args="(size_type index) const " -->
+const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#23e14a38af748edff96a7adc3a0f1c58">at</a> (size_type index) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get const reference to element at given index. Throws exceptions on errors. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a4c6ffff3bf08b92939aa2fc516edfba"></a><!-- doxytag: member="tbb::concurrent_vector::range" ref="a4c6ffff3bf08b92939aa2fc516edfba" args="(size_t grainsize=1)" -->
+range_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#a4c6ffff3bf08b92939aa2fc516edfba">range</a> (size_t grainsize=1)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get range for iterating with parallel algorithms. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3d09ccfb581b879ae64203741035e193"></a><!-- doxytag: member="tbb::concurrent_vector::range" ref="3d09ccfb581b879ae64203741035e193" args="(size_t grainsize=1) const " -->
+const_range_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#3d09ccfb581b879ae64203741035e193">range</a> (size_t grainsize=1) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get const range for iterating with parallel algorithms. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="715fe313c4a9c22731cc404dd80c9ec9"></a><!-- doxytag: member="tbb::concurrent_vector::size" ref="715fe313c4a9c22731cc404dd80c9ec9" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return size of vector. It may include elements under construction. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c6426cb93cf20d3af40f3c90f1f0481a"></a><!-- doxytag: member="tbb::concurrent_vector::empty" ref="c6426cb93cf20d3af40f3c90f1f0481a" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return true if vector is not empty or has elements under construction at least. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3ed6b9ae7217af5103d974045b6f5cd5"></a><!-- doxytag: member="tbb::concurrent_vector::capacity" ref="3ed6b9ae7217af5103d974045b6f5cd5" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#3ed6b9ae7217af5103d974045b6f5cd5">capacity</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#5a0ce05026994b010018f72cfdeb72c1">reserve</a> (size_type n)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocate enough space to grow to size n without having to allocate more memory later.  <a href="#5a0ce05026994b010018f72cfdeb72c1"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8dfb0cb0eef96d440b4dcf801807a718"></a><!-- doxytag: member="tbb::concurrent_vector::resize" ref="8dfb0cb0eef96d440b4dcf801807a718" args="(size_type n)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#8dfb0cb0eef96d440b4dcf801807a718">resize</a> (size_type n)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Resize the vector. Not thread-safe. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="98ce6b2c6d2622f0c030b46dfac3880c"></a><!-- doxytag: member="tbb::concurrent_vector::resize" ref="98ce6b2c6d2622f0c030b46dfac3880c" args="(size_type n, const_reference t)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#98ce6b2c6d2622f0c030b46dfac3880c">resize</a> (size_type n, const_reference t)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Resize the vector, copy t for new elements. Not thread-safe. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1693d1da41b1a8235871be9c6633be35"></a><!-- doxytag: member="tbb::concurrent_vector::compact" ref="1693d1da41b1a8235871be9c6633be35" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">compact</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">An alias for <a class="el" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">shrink_to_fit()</a>. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="03c6f4cf66532bf4cc907ee738a9a186"></a><!-- doxytag: member="tbb::concurrent_vector::shrink_to_fit" ref="03c6f4cf66532bf4cc907ee738a9a186" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">shrink_to_fit</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Optimize memory usage and fragmentation. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c248a017f0576df3e7cd99627836fd6"></a><!-- doxytag: member="tbb::concurrent_vector::max_size" ref="2c248a017f0576df3e7cd99627836fd6" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">max_size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Upper bound on argument to reserve. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="730b23a251ecb6d37f692fb22f38e029"></a><!-- doxytag: member="tbb::concurrent_vector::begin" ref="730b23a251ecb6d37f692fb22f38e029" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#730b23a251ecb6d37f692fb22f38e029">begin</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">start iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c0b51160e5a764982ec97a455f94f2c6"></a><!-- doxytag: member="tbb::concurrent_vector::end" ref="c0b51160e5a764982ec97a455f94f2c6" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#c0b51160e5a764982ec97a455f94f2c6">end</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">end iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="78a06182276ff758788d4c0623ae0d71"></a><!-- doxytag: member="tbb::concurrent_vector::begin" ref="78a06182276ff758788d4c0623ae0d71" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#78a06182276ff758788d4c0623ae0d71">begin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">start const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1e6aa764ce5a1cbd24526f68bc0a2f6b"></a><!-- doxytag: member="tbb::concurrent_vector::end" ref="1e6aa764ce5a1cbd24526f68bc0a2f6b" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#1e6aa764ce5a1cbd24526f68bc0a2f6b">end</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">end const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f88fcf1c920693c39bd9709db33c199f"></a><!-- doxytag: member="tbb::concurrent_vector::cbegin" ref="f88fcf1c920693c39bd9709db33c199f" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#f88fcf1c920693c39bd9709db33c199f">cbegin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">start const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0c15a5d0f1cf75d687dabba07da1d46b"></a><!-- doxytag: member="tbb::concurrent_vector::cend" ref="0c15a5d0f1cf75d687dabba07da1d46b" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#0c15a5d0f1cf75d687dabba07da1d46b">cend</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">end const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5e220926d09236d98f04fe0721e5f9a1"></a><!-- doxytag: member="tbb::concurrent_vector::rbegin" ref="5e220926d09236d98f04fe0721e5f9a1" args="()" -->
+reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#5e220926d09236d98f04fe0721e5f9a1">rbegin</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse start iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="290119a4eb43cd6a9e98fa17016ba3c2"></a><!-- doxytag: member="tbb::concurrent_vector::rend" ref="290119a4eb43cd6a9e98fa17016ba3c2" args="()" -->
+reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#290119a4eb43cd6a9e98fa17016ba3c2">rend</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse end iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9f9c103e18d5f212703805354074ad44"></a><!-- doxytag: member="tbb::concurrent_vector::rbegin" ref="9f9c103e18d5f212703805354074ad44" args="() const " -->
+const_reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#9f9c103e18d5f212703805354074ad44">rbegin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse start const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d438b9b32ea3a8ffb703015b6dce055b"></a><!-- doxytag: member="tbb::concurrent_vector::rend" ref="d438b9b32ea3a8ffb703015b6dce055b" args="() const " -->
+const_reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#d438b9b32ea3a8ffb703015b6dce055b">rend</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse end const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="db78a1d28c9c966050e8a2926d834a33"></a><!-- doxytag: member="tbb::concurrent_vector::crbegin" ref="db78a1d28c9c966050e8a2926d834a33" args="() const " -->
+const_reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#db78a1d28c9c966050e8a2926d834a33">crbegin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse start const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fff9cece89438587997ebedf93c5e962"></a><!-- doxytag: member="tbb::concurrent_vector::crend" ref="fff9cece89438587997ebedf93c5e962" args="() const " -->
+const_reverse_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#fff9cece89438587997ebedf93c5e962">crend</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">reverse end const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="15181759c0bfa2ddce5d10c7550e0002"></a><!-- doxytag: member="tbb::concurrent_vector::front" ref="15181759c0bfa2ddce5d10c7550e0002" args="()" -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#15181759c0bfa2ddce5d10c7550e0002">front</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">the first item <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="502615a858eb9fa0390ee59169065e90"></a><!-- doxytag: member="tbb::concurrent_vector::front" ref="502615a858eb9fa0390ee59169065e90" args="() const " -->
+const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#502615a858eb9fa0390ee59169065e90">front</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">the first item const <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="41ce48d6015a1a2812d41cf620ec3476"></a><!-- doxytag: member="tbb::concurrent_vector::back" ref="41ce48d6015a1a2812d41cf620ec3476" args="()" -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#41ce48d6015a1a2812d41cf620ec3476">back</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">the last item <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bd518e204107d07fd08d0ec5bdfd383d"></a><!-- doxytag: member="tbb::concurrent_vector::back" ref="bd518e204107d07fd08d0ec5bdfd383d" args="() const " -->
+const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#bd518e204107d07fd08d0ec5bdfd383d">back</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">the last item const <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2fdba8e90de6a4d2300222236d46758e"></a><!-- doxytag: member="tbb::concurrent_vector::get_allocator" ref="2fdba8e90de6a4d2300222236d46758e" args="() const " -->
+allocator_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">get_allocator</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">return allocator object <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="423e5aa15e0e3309ad86d026fd85f6f6"></a><!-- doxytag: member="tbb::concurrent_vector::assign" ref="423e5aa15e0e3309ad86d026fd85f6f6" args="(size_type n, const_reference t)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#423e5aa15e0e3309ad86d026fd85f6f6">assign</a> (size_type n, const_reference t)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">assign n items by copying t item <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="93a06b3112cb804f42f40efb5e7387b4"></a><!-- doxytag: member="tbb::concurrent_vector::assign" ref="93a06b3112cb804f42f40efb5e7387b4" args="(I first, I last)" -->
+template&lt;class I&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00166.html#93a06b3112cb804f42f40efb5e7387b4">assign</a> (I first, I last)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">assign range [first, last) <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="96c9c4bd968ed3edb8dd276854d2dae0"></a><!-- doxytag: member="tbb::concurrent_vector::swap" ref="96c9c4bd968ed3edb8dd276854d2dae0" args="(concurrent_vector &amp;vector)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">swap</a> (<a class="el" href="a00166.html">concurrent_vector</a> &amp;vector)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">swap two instances <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear container while keeping memory allocated.  <a href="#26f937a359a66b6aae904c3cd9a3c444"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="da2444b28bb840d38f60d0030333a5fc"></a><!-- doxytag: member="tbb::concurrent_vector::~concurrent_vector" ref="da2444b28bb840d38f60d0030333a5fc" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html#da2444b28bb840d38f60d0030333a5fc">~concurrent_vector</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear and destroy vector. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bb5ae659871478f1f5c68039e1273e12"></a><!-- doxytag: member="tbb::concurrent_vector::internal_vector_base" ref="bb5ae659871478f1f5c68039e1273e12" args="() const " -->
+const internal::concurrent_vector_base_v3 &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal_vector_base</b> () const </td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="30086a58bff79a91103723be20916b96"></a><!-- doxytag: member="tbb::concurrent_vector::internal::vector_iterator" ref="30086a58bff79a91103723be20916b96" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::vector_iterator</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>generic_range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>internal_loop_guide</b></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Exception-aware helper class for filling a segment by exception-danger operators of user class. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, class A&gt;<br>
+ class tbb::concurrent_vector&lt; T, A &gt;</h3>
+
+Concurrent vector container. 
+<p>
+<a class="el" href="a00166.html">concurrent_vector</a> is a container having the following main properties:<ul>
+<li>It provides random indexed access to its elements. The index of the first element is 0.</li><li>It ensures safe concurrent growing its size (different threads can safely append new elements).</li><li>Adding new elements does not invalidate existing iterators and does not change indices of existing items.</li></ul>
+<p>
+<dl compact><dt><b>Compatibility</b></dt><dd>The class meets all Container Requirements and Reversible Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet Sequence Requirements due to absence of insert() and erase() methods.</dd></dl>
+<dl compact><dt><b>Exception Safety</b></dt><dd>Methods working with memory allocation and/or new elements construction can throw an exception if allocator fails to allocate memory or element's default constructor throws one. Concurrent vector's element of type T must conform to the following requirements:<ul>
+<li>Throwing an exception is forbidden for destructor of T.</li><li>Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.</li></ul>
+Otherwise, the program's behavior is undefined. </dd></dl>
+<dl compact><dt><b></b></dt><dd>If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. Invalid state means:<ul>
+<li>There are no guaranties that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.</li><li>An invalid vector instance cannot be repaired; it is unable to grow anymore.</li><li>Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.</li><li>Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using <a class="el" href="a00166.html#0c073ca43e787c7cbf7b0e26d2221748">at()</a> method a C++ exception is thrown.</li></ul>
+If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.</dd></dl>
+<dl compact><dt><b>Fragmentation</b></dt><dd>Unlike an STL vector, a <a class="el" href="a00166.html">concurrent_vector</a> does not move existing elements if it needs to allocate more memory. The container is divided into a series of contiguous arrays of elements. The first reservation, growth, or assignment operation determines the size of the first array. Using small number of elements as initial size incurs fragmentation that may increase element access time. Internal layout can be optimized by method <a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">compact()</a> that merges several smaller arrays into one solid.</dd></dl>
+<dl compact><dt><b>Changes since TBB 2.1</b></dt><dd><ul>
+<li>Fixed guarantees of <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">concurrent_vector::size()</a> and <a class="el" href="a00166.html#47fe588214dd5fa06ab6e8ab78d83874">grow_to_at_least()</a> methods to assure elements are allocated.</li><li>Methods <a class="el" href="a00166.html#c0b51160e5a764982ec97a455f94f2c6">end()</a>/rbegin()/back() are partly thread-safe since they use <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size()</a> to get the end of vector</li><li>Added <a class="el" href="a00166.html#8dfb0cb0eef96d440b4dcf801807a718">resize()</a> methods (not thread-safe)</li><li>Added cbegin/cend/crbegin/crend methods</li><li>Changed return type of methods grow* and push_back to iterator</li></ul>
+</dd></dl>
+<dl compact><dt><b>Changes since TBB 2.0</b></dt><dd><ul>
+<li>Implemented exception-safety guaranties</li><li>Added template argument for allocator</li><li>Added allocator argument in constructors</li><li>Faster index calculation</li><li>First growth call specifies a number of segments to be merged in the first allocation.</li><li>Fixed memory blow up for swarm of vector's instances of small size</li><li>Added <a class="el" href="a00166.html#38274ab3f772ecba600c7daca7690102">grow_by(size_type n, const_reference t)</a> growth using copying constructor to init new items.</li><li>Added STL-like constructors.</li><li>Added operators ==, &lt; and derivatives</li><li>Added <a class="el" href="a00166.html#0c073ca43e787c7cbf7b0e26d2221748">at()</a> method, approved for using after an exception was thrown inside the vector</li><li>Added <a class="el" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">get_allocator()</a> method.</li><li>Added <a class="el" href="a00166.html#423e5aa15e0e3309ad86d026fd85f6f6">assign()</a> methods</li><li>Added <a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">compact()</a> method to defragment first segments</li><li>Added <a class="el" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">swap()</a> method</li><li><a class="el" href="a00166.html#a4c6ffff3bf08b92939aa2fc516edfba">range()</a> defaults on grainsize = 1 supporting auto grainsize algorithms. </li></ul>
+</dd></dl>
+
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="26f937a359a66b6aae904c3cd9a3c444"></a><!-- doxytag: member="tbb::concurrent_vector::clear" ref="26f937a359a66b6aae904c3cd9a3c444" args="()" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::clear           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Clear container while keeping memory allocated. 
+<p>
+To free up the memory, use in conjunction with method <a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">compact()</a>. Not thread safe 
+</div>
+</div><p>
+<a class="anchor" name="473a59a4c9308b93411b898b3110d26c"></a><!-- doxytag: member="tbb::concurrent_vector::grow_by" ref="473a59a4c9308b93411b898b3110d26c" args="(size_type delta, const_reference t)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">iterator <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_by           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>delta</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const_reference&nbsp;</td>
+          <td class="paramname"> <em>t</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Returns iterator pointing to the first new element. 
+</div>
+</div><p>
+<a class="anchor" name="38274ab3f772ecba600c7daca7690102"></a><!-- doxytag: member="tbb::concurrent_vector::grow_by" ref="38274ab3f772ecba600c7daca7690102" args="(size_type delta, const_reference t)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">size_type <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_by           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>delta</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const_reference&nbsp;</td>
+          <td class="paramname"> <em>t</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Grow by "delta" elements using copying constuctor. 
+<p>
+Returns old size. 
+</div>
+</div><p>
+<a class="anchor" name="c8177b1865270ea68aa1ab9148e5e35e"></a><!-- doxytag: member="tbb::concurrent_vector::grow_by" ref="c8177b1865270ea68aa1ab9148e5e35e" args="(size_type delta)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">iterator <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_by           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>delta</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Returns iterator pointing to the first new element. 
+</div>
+</div><p>
+<a class="anchor" name="30484e3959892fd5392fa93c873c31f0"></a><!-- doxytag: member="tbb::concurrent_vector::grow_by" ref="30484e3959892fd5392fa93c873c31f0" args="(size_type delta)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">size_type <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_by           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>delta</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Grow by "delta" elements. 
+<p>
+Returns old size. 
+</div>
+</div><p>
+<a class="anchor" name="a7e3b67c8ccab16d0aecc80899ae799d"></a><!-- doxytag: member="tbb::concurrent_vector::grow_to_at_least" ref="a7e3b67c8ccab16d0aecc80899ae799d" args="(size_type n)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">iterator <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_to_at_least           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>n</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. May return while other elements are being constructed by other threads. Returns iterator that points to beginning of appended sequence. If no elements were appended, returns iterator pointing to nth element. 
+</div>
+</div><p>
+<a class="anchor" name="47fe588214dd5fa06ab6e8ab78d83874"></a><!-- doxytag: member="tbb::concurrent_vector::grow_to_at_least" ref="47fe588214dd5fa06ab6e8ab78d83874" args="(size_type n)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::grow_to_at_least           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>n</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Append minimal sequence of elements such that <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size()</a>&gt;=n. 
+<p>
+The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. May return while other elements are being constructed by other threads. 
+</div>
+</div><p>
+<a class="anchor" name="4c52f2950bb1832886bd4458eb09d7eb"></a><!-- doxytag: member="tbb::concurrent_vector::operator[]" ref="4c52f2950bb1832886bd4458eb09d7eb" args="(size_type index)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">reference <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::operator[]           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>index</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Get reference to element at given index. 
+<p>
+This method is thread-safe for concurrent reads, and also while growing the vector, as long as the calling thread has checked that index&lt;<a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size()</a>. 
+</div>
+</div><p>
+<a class="anchor" name="e94e038f915c0268fdf2d3d7f87d81b8"></a><!-- doxytag: member="tbb::concurrent_vector::push_back" ref="e94e038f915c0268fdf2d3d7f87d81b8" args="(const_reference item) iterator push_back(const _reference item)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">size_type <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::push_back           </td>
+          <td>(</td>
+          <td class="paramtype">const_reference&nbsp;</td>
+          <td class="paramname"> <em>item</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"> const<code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Push item. 
+<p>
+Returns iterator pointing to the new element. 
+</div>
+</div><p>
+<a class="anchor" name="5a0ce05026994b010018f72cfdeb72c1"></a><!-- doxytag: member="tbb::concurrent_vector::reserve" ref="5a0ce05026994b010018f72cfdeb72c1" args="(size_type n)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename T, class A&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00166.html">tbb::concurrent_vector</a>&lt; T, A &gt;::reserve           </td>
+          <td>(</td>
+          <td class="paramtype">size_type&nbsp;</td>
+          <td class="paramname"> <em>n</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Allocate enough space to grow to size n without having to allocate more memory later. 
+<p>
+Like most of the methods provided for STL compatibility, this method is *not* thread safe. The capacity afterwards may be bigger than the requested reservation. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00347.html">concurrent_vector.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.html
new file mode 100644 (file)
index 0000000..ab91699
--- /dev/null
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::empty_task Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00167.html">empty_task</a></div>
+<h1>tbb::empty_task Class Reference<br>
+<small>
+[<a class="el" href="a00285.html">Task Scheduling</a>]</small>
+</h1><!-- doxytag: class="tbb::empty_task" --><!-- doxytag: inherits="tbb::task" -->task that does nothing. Useful for synchronization.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00438.html">task.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::empty_task:
+<p><center><img src="a00167.png" usemap="#tbb::empty_task_map" border="0" alt=""></center>
+<map name="tbb::empty_task_map">
+<area href="a00204.html" alt="tbb::task" shape="rect" coords="0,0,97,24">
+</map>
+<a href="a00076.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+task that does nothing. Useful for synchronization. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00438.html">task.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.png
new file mode 100644 (file)
index 0000000..2fa6419
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00167.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00168.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00168.html
new file mode 100644 (file)
index 0000000..82292f4
--- /dev/null
@@ -0,0 +1,197 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface6</b>::<a class="el" href="a00168.html">enumerable_thread_specific</a></div>
+<h1>tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00281.html">Containers</a>]</small>
+</h1><!-- doxytag: class="tbb::interface6::enumerable_thread_specific" -->The <a class="el" href="a00168.html">enumerable_thread_specific</a> container.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00359.html">enumerable_thread_specific.h</a>&gt;</code>
+<p>
+<a href="a00031.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3c03eb40955b933b01987222722ac4bd"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::allocator_type" ref="3c03eb40955b933b01987222722ac4bd" args="" -->
+typedef Allocator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#3c03eb40955b933b01987222722ac4bd">allocator_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Basic types. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e93f3b75e82eaa845cc1a0514eaba31a"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::value_type" ref="e93f3b75e82eaa845cc1a0514eaba31a" args="" -->
+typedef T&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0fd259a6b20641c768079f41d3213904"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::reference" ref="0fd259a6b20641c768079f41d3213904" args="" -->
+typedef T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c1d47e24439524fea72d9c10df25d54"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::const_reference" ref="2c1d47e24439524fea72d9c10df25d54" args="" -->
+typedef const T &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="27cf20b87f4381a69203aed11e5a34dc"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::pointer" ref="27cf20b87f4381a69203aed11e5a34dc" args="" -->
+typedef T *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0e132e2a40dd6ce5472f8e13a1e80947"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::const_pointer" ref="0e132e2a40dd6ce5472f8e13a1e80947" args="" -->
+typedef const T *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c67c208bec3e8b77190d545bd1f150e3"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::size_type" ref="c67c208bec3e8b77190d545bd1f150e3" args="" -->
+typedef internal_collection_type::size_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eda9be9a80bed094d4657948b5b7831f"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::difference_type" ref="eda9be9a80bed094d4657948b5b7831f" args="" -->
+typedef internal_collection_type::difference_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bd6f8e0af3c5e5166a4223f88b03e0a5"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::iterator" ref="bd6f8e0af3c5e5166a4223f88b03e0a5" args="" -->
+typedef internal::enumerable_thread_specific_iterator&lt;<br>
+ <a class="el" href="a00166.html">internal_collection_type</a>,<br>
+ value_type &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8ff2ff1a117048f5d57aedebb12d57e1"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::const_iterator" ref="8ff2ff1a117048f5d57aedebb12d57e1" args="" -->
+typedef internal::enumerable_thread_specific_iterator&lt;<br>
+ <a class="el" href="a00166.html">internal_collection_type</a>,<br>
+ const value_type &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_iterator</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d4c23a666adbf88bd3280873de9d7f39"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::range_type" ref="d4c23a666adbf88bd3280873de9d7f39" args="" -->
+typedef generic_range_type&lt;<br>
+ iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>range_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0ce41c6603d8f547f11df05d1ccb2184"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::const_range_type" ref="0ce41c6603d8f547f11df05d1ccb2184" args="" -->
+typedef generic_range_type&lt;<br>
+ const_iterator &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_range_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0cfa3a5c8f2be3bbf313d93b1fa8cdb3"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="0cfa3a5c8f2be3bbf313d93b1fa8cdb3" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#0cfa3a5c8f2be3bbf313d93b1fa8cdb3">enumerable_thread_specific</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Default constructor. Each local instance of T is default constructed. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="8d4b456ff9d7b289c73254eccc11db45"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="8d4b456ff9d7b289c73254eccc11db45" args="(Finit finit)" -->
+template&lt;typename Finit&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00168.html#8d4b456ff9d7b289c73254eccc11db45">enumerable_thread_specific</a> (Finit finit)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Constructor with initializer functor. Each local instance of T is constructed by T(finit()). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7bce6829981c9efe3f59cae2355e383e"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="7bce6829981c9efe3f59cae2355e383e" args="(const T &amp;exemplar)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#7bce6829981c9efe3f59cae2355e383e">enumerable_thread_specific</a> (const T &amp;exemplar)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Constuctor with exemplar. Each local instance of T is copied-constructed from the exemplar. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5a7907d9e3e5b18e7a7b55211ef3213f"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::~enumerable_thread_specific" ref="5a7907d9e3e5b18e7a7b55211ef3213f" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#5a7907d9e3e5b18e7a7b55211ef3213f">~enumerable_thread_specific</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c8d5265ccbd5e4485996b3f3baaa5ba1"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::local" ref="c8d5265ccbd5e4485996b3f3baaa5ba1" args="()" -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#c8d5265ccbd5e4485996b3f3baaa5ba1">local</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">returns reference to local, discarding exists <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7dc79058d2832f7447de8e691c3455ea"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::local" ref="7dc79058d2832f7447de8e691c3455ea" args="(bool &amp;exists)" -->
+reference&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#7dc79058d2832f7447de8e691c3455ea">local</a> (bool &amp;exists)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns reference to calling thread's local copy, creating one if necessary. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="33fd6593da1ed14340f10f67d5a69130"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::size" ref="33fd6593da1ed14340f10f67d5a69130" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#33fd6593da1ed14340f10f67d5a69130">size</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get the number of local copies. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="72595886d0ac8fd0543f90038570510d"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::empty" ref="72595886d0ac8fd0543f90038570510d" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#72595886d0ac8fd0543f90038570510d">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">true if there have been no local copies created <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="01d7baf8e913ab2819e97917a2ac795f"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::begin" ref="01d7baf8e913ab2819e97917a2ac795f" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#01d7baf8e913ab2819e97917a2ac795f">begin</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">begin iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2456ff88252fc921b01cd8907628a4ee"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::end" ref="2456ff88252fc921b01cd8907628a4ee" args="()" -->
+iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#2456ff88252fc921b01cd8907628a4ee">end</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">end iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="84afb3906a39e399cde1c950d6351300"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::begin" ref="84afb3906a39e399cde1c950d6351300" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#84afb3906a39e399cde1c950d6351300">begin</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">begin const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cb448bb4977ce366ceb7344085cc7050"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::end" ref="cb448bb4977ce366ceb7344085cc7050" args="() const " -->
+const_iterator&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#cb448bb4977ce366ceb7344085cc7050">end</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">end const iterator <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b22c54990b2362cd93c1a8f73de140bc"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::range" ref="b22c54990b2362cd93c1a8f73de140bc" args="(size_t grainsize=1)" -->
+range_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#b22c54990b2362cd93c1a8f73de140bc">range</a> (size_t grainsize=1)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get range for parallel algorithms. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3b068000cf4dbf9b40f8bb7e3fc53e0b"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::range" ref="3b068000cf4dbf9b40f8bb7e3fc53e0b" args="(size_t grainsize=1) const " -->
+const_range_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#3b068000cf4dbf9b40f8bb7e3fc53e0b">range</a> (size_t grainsize=1) const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Get const range for parallel algorithms. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a8764176d4b6014c5d65f1051851abc8"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::clear" ref="a8764176d4b6014c5d65f1051851abc8" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html#a8764176d4b6014c5d65f1051851abc8">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys local copies. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="43bbf3a313c5431559df2946f76221e0"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="43bbf3a313c5431559df2946f76221e0" args="(const enumerable_thread_specific&lt; U, Alloc, Cachetype &gt; &amp;other)" -->
+template&lt;typename U, typename Alloc, ets_key_usage_type Cachetype&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>enumerable_thread_specific</b> (const <a class="el" href="a00168.html">enumerable_thread_specific</a>&lt; U, Alloc, Cachetype &gt; &amp;other)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="25a9c336cfd5a171c29cbdd0feb84249"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="25a9c336cfd5a171c29cbdd0feb84249" args="(const enumerable_thread_specific &amp;other)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>enumerable_thread_specific</b> (const <a class="el" href="a00168.html">enumerable_thread_specific</a> &amp;other)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1f8f467eb4c44c58614d0f9dce687477"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::operator=" ref="1f8f467eb4c44c58614d0f9dce687477" args="(const enumerable_thread_specific &amp;other)" -->
+<a class="el" href="a00168.html">enumerable_thread_specific</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00168.html">enumerable_thread_specific</a> &amp;other)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="0e7ebbfd8e04eb22fe59b581206e7888"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::operator=" ref="0e7ebbfd8e04eb22fe59b581206e7888" args="(const enumerable_thread_specific&lt; U, Alloc, Cachetype &gt; &amp;other)" -->
+template&lt;typename U, typename Alloc, ets_key_usage_type Cachetype&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top"><a class="el" href="a00168.html">enumerable_thread_specific</a> &amp;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00168.html">enumerable_thread_specific</a>&lt; U, Alloc, Cachetype &gt; &amp;other)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="1eac7eff6e21b7fc299b67acb36eb3c7"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::combine" ref="1eac7eff6e21b7fc299b67acb36eb3c7" args="(combine_func_t f_combine)" -->
+template&lt;typename combine_func_t&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">T&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>combine</b> (combine_func_t f_combine)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="f671d7bd4c1d3a750b4be4c32a489c55"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::combine_each" ref="f671d7bd4c1d3a750b4be4c32a489c55" args="(combine_func_t f_combine)" -->
+template&lt;typename combine_func_t&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>combine_each</b> (combine_func_t f_combine)</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="45364b2f249ceb935cff05c29fcfcf5c"></a><!-- doxytag: member="tbb::interface6::enumerable_thread_specific::enumerable_thread_specific" ref="45364b2f249ceb935cff05c29fcfcf5c" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>enumerable_thread_specific</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>generic_range_type</b></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A generic range, used to create range objects from the iterators. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, typename Allocator = cache_aligned_allocator&lt;T&gt;, ets_key_usage_type ETS_key_type = ets_no_key&gt;<br>
+ class tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</h3>
+
+The <a class="el" href="a00168.html">enumerable_thread_specific</a> container. 
+<p>
+<a class="el" href="a00168.html">enumerable_thread_specific</a> has the following properties:<ul>
+<li>thread-local copies are lazily created, with default, exemplar or function initialization.</li><li>thread-local copies do not move (during lifetime, and excepting <a class="el" href="a00168.html#a8764176d4b6014c5d65f1051851abc8">clear()</a>) so the address of a copy is invariant.</li><li>the contained objects need not have operator=() defined if combine is not used.</li><li><a class="el" href="a00168.html">enumerable_thread_specific</a> containers may be copy-constructed or assigned.</li><li>thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed.</li><li>outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods</li></ul>
+<p>
+<dl compact><dt><b>Segmented iterator</b></dt><dd>When the thread-local objects are containers with input_iterators defined, a segmented iterator may be used to iterate over all the elements of all thread-local copies.</dd></dl>
+<dl compact><dt><b>combine and combine_each</b></dt><dd><ul>
+<li>Both methods are defined for <a class="el" href="a00168.html">enumerable_thread_specific</a>.</li><li>combine() requires the the type T have operator=() defined.</li><li>neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)</li><li>Both are evaluated in serial context (the methods are assumed to be non-benign.) </li></ul>
+</dd></dl>
+
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00359.html">enumerable_thread_specific.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.html
new file mode 100644 (file)
index 0000000..9e04581
--- /dev/null
@@ -0,0 +1,156 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::filter Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00169.html">filter</a></div>
+<h1>tbb::filter Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::filter" -->A stage in a pipeline.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00419.html">pipeline.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::filter:
+<p><center><img src="a00169.png" usemap="#tbb::filter_map" border="0" alt=""></center>
+<map name="tbb::filter_map">
+<area href="a00214.html" alt="tbb::thread_bound_filter" shape="rect" coords="0,56,145,80">
+</map>
+<a href="a00049.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fcfec27656a69ff2072802ac001e936f"></a><!-- doxytag: member="tbb::filter::is_serial" ref="fcfec27656a69ff2072802ac001e936f" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#fcfec27656a69ff2072802ac001e936f">is_serial</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if filter is serial. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cd53206c4795ef2df5df26b795caf692"></a><!-- doxytag: member="tbb::filter::is_ordered" ref="cd53206c4795ef2df5df26b795caf692" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#cd53206c4795ef2df5df26b795caf692">is_ordered</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if filter must receive stream in order. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="15c29cae5d237e6d63dbfe5c94af89d5"></a><!-- doxytag: member="tbb::filter::is_bound" ref="15c29cae5d237e6d63dbfe5c94af89d5" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">is_bound</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if filter is thread-bound. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">operator()</a> (void *item)=0</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Operate on an item from the input stream, and return item for output stream.  <a href="#fa1b3dc1f4f47563ccab7f4d92f5b543"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">~filter</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy filter.  <a href="#66d159f362293e3964ba3da8bc1d2604"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html#56275eb889c77c4807967133e21401bd">finalize</a> (void *)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys item if pipeline was cancelled.  <a href="#56275eb889c77c4807967133e21401bd"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8228ce0751009045e8158d2e642715a6"></a><!-- doxytag: member="tbb::filter::internal::stage_task" ref="8228ce0751009045e8158d2e642715a6" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::stage_task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05d67150ca324698ba852553e223d3eb"></a><!-- doxytag: member="tbb::filter::internal::pipeline_root_task" ref="05d67150ca324698ba852553e223d3eb" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::pipeline_root_task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b211ee58bada099e24280d478017cbff"></a><!-- doxytag: member="tbb::filter::pipeline" ref="b211ee58bada099e24280d478017cbff" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>pipeline</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e43b69a482df8e053cb199af69eb5139"></a><!-- doxytag: member="tbb::filter::thread_bound_filter" ref="e43b69a482df8e053cb199af69eb5139" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>thread_bound_filter</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A stage in a pipeline. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="66d159f362293e3964ba3da8bc1d2604"></a><!-- doxytag: member="tbb::filter::~filter" ref="66d159f362293e3964ba3da8bc1d2604" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual __TBB_EXPORTED_METHOD tbb::filter::~filter           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroy filter. 
+<p>
+If the filter was added to a pipeline, the pipeline must be destroyed first. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="56275eb889c77c4807967133e21401bd"></a><!-- doxytag: member="tbb::filter::finalize" ref="56275eb889c77c4807967133e21401bd" args="(void *)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual void tbb::filter::finalize           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline, virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroys item if pipeline was cancelled. 
+<p>
+Required to prevent memory leaks. Note it can be called concurrently even for serial filters. 
+</div>
+</div><p>
+<a class="anchor" name="fa1b3dc1f4f47563ccab7f4d92f5b543"></a><!-- doxytag: member="tbb::filter::operator()" ref="fa1b3dc1f4f47563ccab7f4d92f5b543" args="(void *item)=0" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual void* tbb::filter::operator()           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>item</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [pure virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Operate on an item from the input stream, and return item for output stream. 
+<p>
+Returns NULL if filter is a sink. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00419.html">pipeline.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.png
new file mode 100644 (file)
index 0000000..e668b17
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00169.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00170.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00170.html
new file mode 100644 (file)
index 0000000..5e3dd8e
--- /dev/null
@@ -0,0 +1,76 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::filter_t&lt; T, U &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<a class="el" href="a00170.html">filter_t</a></div>
+<h1>tbb::interface5::filter_t&lt; T, U &gt; Class Template Reference</h1><!-- doxytag: class="tbb::interface5::filter_t" -->Class representing a chain of type-safe pipeline filters.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00419.html">pipeline.h</a>&gt;</code>
+<p>
+<a href="a00053.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e03acdfacd859b954ee45c41fad09992"></a><!-- doxytag: member="tbb::interface5::filter_t::filter_t" ref="e03acdfacd859b954ee45c41fad09992" args="(const filter_t&lt; T, U &gt; &amp;rhs)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>filter_t</b> (const <a class="el" href="a00170.html">filter_t</a>&lt; T, U &gt; &amp;rhs)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="08960002b559318d985ab1c90344ed06"></a><!-- doxytag: member="tbb::interface5::filter_t::filter_t" ref="08960002b559318d985ab1c90344ed06" args="(tbb::filter::mode mode, const Body &amp;body)" -->
+template&lt;typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>filter_t</b> (tbb::filter::mode mode, const Body &amp;body)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a022e50a3f46111e385d8859699d5b33"></a><!-- doxytag: member="tbb::interface5::filter_t::operator=" ref="a022e50a3f46111e385d8859699d5b33" args="(const filter_t&lt; T, U &gt; &amp;rhs)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00170.html">filter_t</a>&lt; T, U &gt; &amp;rhs)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9785b1fbc87683bcfe0383b117b41a69"></a><!-- doxytag: member="tbb::interface5::filter_t::clear" ref="9785b1fbc87683bcfe0383b117b41a69" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>clear</b> ()</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fff802cb5656e3788a89f3314aed2efe"></a><!-- doxytag: member="tbb::interface5::filter_t::internal::pipeline_proxy" ref="fff802cb5656e3788a89f3314aed2efe" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::pipeline_proxy</b></td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="85c2892eff1fddcd06e28911e75838bd"></a><!-- doxytag: member="tbb::interface5::filter_t::make_filter" ref="85c2892eff1fddcd06e28911e75838bd" args="(tbb::filter::mode, const Body &amp;)" -->
+template&lt;typename T_, typename U_, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top"><a class="el" href="a00170.html">filter_t</a>&lt; T_, U_ &gt;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">make_filter</a> (tbb::filter::mode, const Body &amp;)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Create a filter to participate in parallel_pipeline. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="6b9b1f5ce7f2121f093b8cd1d84707ad"></a><!-- doxytag: member="tbb::interface5::filter_t::operator &amp;" ref="6b9b1f5ce7f2121f093b8cd1d84707ad" args="(const filter_t&lt; T_, V_ &gt; &amp;, const filter_t&lt; V_, U_ &gt; &amp;)" -->
+template&lt;typename T_, typename V_, typename U_&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top"><a class="el" href="a00170.html">filter_t</a>&lt; T_, U_ &gt;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator &amp;</b> (const <a class="el" href="a00170.html">filter_t</a>&lt; T_, V_ &gt; &amp;, const <a class="el" href="a00170.html">filter_t</a>&lt; V_, U_ &gt; &amp;)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, typename U&gt;<br>
+ class tbb::interface5::filter_t&lt; T, U &gt;</h3>
+
+Class representing a chain of type-safe pipeline filters. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00419.html">pipeline.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00171.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00171.html
new file mode 100644 (file)
index 0000000..cbff529
--- /dev/null
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::final_scan_tag Struct Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00171.html">final_scan_tag</a></div>
+<h1>tbb::final_scan_tag Struct Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::final_scan_tag" -->Used to indicate that the final scan is being performed.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00401.html">parallel_scan.h</a>&gt;</code>
+<p>
+<a href="a00042.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="06888e4a548788fcb2c15542d428d8c9"></a><!-- doxytag: member="tbb::final_scan_tag::is_final_scan" ref="06888e4a548788fcb2c15542d428d8c9" args="()" -->
+static bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_final_scan</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Used to indicate that the final scan is being performed. 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00401.html">parallel_scan.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00172.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00172.html
new file mode 100644 (file)
index 0000000..dc09ac6
--- /dev/null
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::flow_control Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<a class="el" href="a00172.html">flow_control</a></div>
+<h1>tbb::interface5::flow_control Class Reference</h1><!-- doxytag: class="tbb::interface5::flow_control" -->input_filter control to signal end-of-input for parallel_pipeline  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00419.html">pipeline.h</a>&gt;</code>
+<p>
+<a href="a00052.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f907b38fcfbb48d2e949147fa3379037"></a><!-- doxytag: member="tbb::interface5::flow_control::stop" ref="f907b38fcfbb48d2e949147fa3379037" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>stop</b> ()</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="15bb80e25fbcc4213e0053d558df6462"></a><!-- doxytag: member="tbb::interface5::flow_control::internal::concrete_filter" ref="15bb80e25fbcc4213e0053d558df6462" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::concrete_filter</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+input_filter control to signal end-of-input for parallel_pipeline 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00419.html">pipeline.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00173.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00173.html
new file mode 100644 (file)
index 0000000..7bb04c0
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::improper_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00173.html">improper_lock</a></div>
+<h1>tbb::improper_lock Class Reference</h1><!-- doxytag: class="tbb::improper_lock" -->Exception for PPL locks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<a href="a00095.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="74213c2d20407e1fa16b70ca55ef27c6"></a><!-- doxytag: member="tbb::improper_lock::what" ref="74213c2d20407e1fa16b70ca55ef27c6" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><b>what</b> () const   throw ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Exception for PPL locks. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00174.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00174.html
new file mode 100644 (file)
index 0000000..b516f40
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::invalid_multiple_scheduling Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00174.html">invalid_multiple_scheduling</a></div>
+<h1>tbb::invalid_multiple_scheduling Class Reference</h1><!-- doxytag: class="tbb::invalid_multiple_scheduling" -->Exception for repeated scheduling of the same task_handle.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<a href="a00097.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="57082ccac880db17144db76c0f94701e"></a><!-- doxytag: member="tbb::invalid_multiple_scheduling::what" ref="57082ccac880db17144db76c0f94701e" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><b>what</b> () const   throw ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Exception for repeated scheduling of the same task_handle. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00175.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00175.html
new file mode 100644 (file)
index 0000000..6167cb8
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::missing_wait Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00175.html">missing_wait</a></div>
+<h1>tbb::missing_wait Class Reference</h1><!-- doxytag: class="tbb::missing_wait" -->Exception for missing wait on structured_task_group.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<a href="a00096.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fe3f8527f9013266dc845a4ff22dd2cf"></a><!-- doxytag: member="tbb::missing_wait::what" ref="fe3f8527f9013266dc845a4ff22dd2cf" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><b>what</b> () const   throw ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Exception for missing wait on structured_task_group. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.html
new file mode 100644 (file)
index 0000000..cf751a3
--- /dev/null
@@ -0,0 +1,171 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::movable_exception&lt; ExceptionData &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00176.html">movable_exception</a></div>
+<h1>tbb::movable_exception&lt; ExceptionData &gt; Class Template Reference</h1><!-- doxytag: class="tbb::movable_exception" --><!-- doxytag: inherits="tbb::tbb_exception" -->Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::movable_exception&lt; ExceptionData &gt;:
+<p><center><img src="a00176.png" usemap="#tbb::movable_exception< ExceptionData >_map" border="0" alt=""></center>
+<map name="tbb::movable_exception< ExceptionData >_map">
+<area href="a00211.html" alt="tbb::tbb_exception" shape="rect" coords="0,0,248,24">
+</map>
+<a href="a00100.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="43dfdc5298df3aa8a7d7d146604f84ef"></a><!-- doxytag: member="tbb::movable_exception::movable_exception" ref="43dfdc5298df3aa8a7d7d146604f84ef" args="(const ExceptionData &amp;data_)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>movable_exception</b> (const ExceptionData &amp;data_)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="48f97fbf1e5d66c5977891f4375ee4d9"></a><!-- doxytag: member="tbb::movable_exception::movable_exception" ref="48f97fbf1e5d66c5977891f4375ee4d9" args="(const movable_exception &amp;src)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>movable_exception</b> (const <a class="el" href="a00176.html">movable_exception</a> &amp;src)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dfafefe27f2afaa7c7894ca9c71cc568"></a><!-- doxytag: member="tbb::movable_exception::operator=" ref="dfafefe27f2afaa7c7894ca9c71cc568" args="(const movable_exception &amp;src)" -->
+const <a class="el" href="a00176.html">movable_exception</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator=</b> (const <a class="el" href="a00176.html">movable_exception</a> &amp;src)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="47114cbc20c8e13cac3108749a3e085d"></a><!-- doxytag: member="tbb::movable_exception::data" ref="47114cbc20c8e13cac3108749a3e085d" args="()" -->
+ExceptionData &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>data</b> ()  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7d93c000d862d2558efd9a34817adbce"></a><!-- doxytag: member="tbb::movable_exception::data" ref="7d93c000d862d2558efd9a34817adbce" args="() const " -->
+const ExceptionData &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>data</b> () const   throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bc5f5c4739b17ac5211ac58226c2f5a5"></a><!-- doxytag: member="tbb::movable_exception::name" ref="bc5f5c4739b17ac5211ac58226c2f5a5" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">name</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns RTTI name of the originally intercepted exception. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b33a89bccf0c63106f1270c7bfaaf54f"></a><!-- doxytag: member="tbb::movable_exception::what" ref="b33a89bccf0c63106f1270c7bfaaf54f" args="() const " -->
+const char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">what</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns the result of originally intercepted exception's <a class="el" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">what()</a> method. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="el" href="a00176.html">movable_exception</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">move</a> ()  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Creates and returns pointer to the deep copy of this exception object.  <a href="#1aea0ad179d6f0481fe7f3495f66adf9"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">destroy</a> ()  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys objects created by the <a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">move()</a> method.  <a href="#7a46873119d9f85a7b0009c13e41a258"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#17cffba35811c92b7e65d63506b69602">throw_self</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Throws this exception object.  <a href="#17cffba35811c92b7e65d63506b69602"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a8c0ae2089ae784b28907cf748b89416"></a><!-- doxytag: member="tbb::movable_exception::my_exception_data" ref="a8c0ae2089ae784b28907cf748b89416" args="" -->
+ExceptionData&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">User data. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename ExceptionData&gt;<br>
+ class tbb::movable_exception&lt; ExceptionData &gt;</h3>
+
+Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread. 
+<p>
+Code using TBB can instantiate this template with an arbitrary ExceptionData type and throw this exception object. Such exceptions are intercepted by the TBB scheduler and delivered to the root thread (). <dl compact><dt><b>See also:</b></dt><dd><a class="el" href="a00211.html">tbb::tbb_exception</a> </dd></dl>
+
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="7a46873119d9f85a7b0009c13e41a258"></a><!-- doxytag: member="tbb::movable_exception::destroy" ref="7a46873119d9f85a7b0009c13e41a258" args="()" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename ExceptionData&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00176.html">tbb::movable_exception</a>&lt; ExceptionData &gt;::destroy           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [inline, virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroys objects created by the <a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">move()</a> method. 
+<p>
+Frees memory and calls destructor for this exception object. Can and must be used only on objects created by the move method. 
+<p>
+Implements <a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<a class="anchor" name="1aea0ad179d6f0481fe7f3495f66adf9"></a><!-- doxytag: member="tbb::movable_exception::move" ref="1aea0ad179d6f0481fe7f3495f66adf9" args="()" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename ExceptionData&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="a00176.html">movable_exception</a>* <a class="el" href="a00176.html">tbb::movable_exception</a>&lt; ExceptionData &gt;::move           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [inline, virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Creates and returns pointer to the deep copy of this exception object. 
+<p>
+Move semantics is allowed. 
+<p>
+Implements <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<a class="anchor" name="17cffba35811c92b7e65d63506b69602"></a><!-- doxytag: member="tbb::movable_exception::throw_self" ref="17cffba35811c92b7e65d63506b69602" args="()" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename ExceptionData&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00176.html">tbb::movable_exception</a>&lt; ExceptionData &gt;::throw_self           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline, virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Throws this exception object. 
+<p>
+Make sure that if you have several levels of derivation from this interface you implement or override this method on the most derived level. The implementation is as simple as "throw *this;". Failure to do this will result in exception of a base class type being thrown. 
+<p>
+Implements <a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">tbb::tbb_exception</a>.
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.png
new file mode 100644 (file)
index 0000000..a85d170
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00176.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00177.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00177.html
new file mode 100644 (file)
index 0000000..f32d238
--- /dev/null
@@ -0,0 +1,136 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00177.html">mutex</a></div>
+<h1>tbb::mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::mutex" -->Wrapper around the platform's native reader-writer lock.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00372.html">mutex.h</a>&gt;</code>
+<p>
+<a href="a00034.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9f1ec84d5815263ceae853f06ddb4cac"></a><!-- doxytag: member="tbb::mutex::native_handle_type" ref="9f1ec84d5815263ceae853f06ddb4cac" args="" -->
+typedef LPCRITICAL_SECTION&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return native_handle. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a1f4d4b88a03eb3e95c76569b91a0792"></a><!-- doxytag: member="tbb::mutex::native_handle_type" ref="a1f4d4b88a03eb3e95c76569b91a0792" args="" -->
+typedef pthread_mutex_t *&nbsp;</td><td class="memItemRight" valign="bottom"><b>native_handle_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c907aa5ae5274da759c4188c8ab196034e5963fe9e27d7c4d0a2d4ffa24a50a7"></a><!-- doxytag: member="tbb::mutex::INITIALIZED" ref="c907aa5ae5274da759c4188c8ab196034e5963fe9e27d7c4d0a2d4ffa24a50a7" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>INITIALIZED</b> = 0x1234</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c907aa5ae5274da759c4188c8ab19603646f4766864612448d6f9b21ff61abcd"></a><!-- doxytag: member="tbb::mutex::DESTROYED" ref="c907aa5ae5274da759c4188c8ab19603646f4766864612448d6f9b21ff61abcd" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>DESTROYED</b> = 0x789A</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c907aa5ae5274da759c4188c8ab196031de5796ebbbaa1e30d19e5d54f0f6555"></a><!-- doxytag: member="tbb::mutex::HELD" ref="c907aa5ae5274da759c4188c8ab196031de5796ebbbaa1e30d19e5d54f0f6555" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>HELD</b> = 0x56CD</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><b>state_t</b> { <b>INITIALIZED</b> = 0x1234, 
+<b>DESTROYED</b> = 0x789A, 
+<b>HELD</b> = 0x56CD
+ }</td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05313cb77d4f85213103d4dab74ed454"></a><!-- doxytag: member="tbb::mutex::mutex" ref="05313cb77d4f85213103d4dab74ed454" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#05313cb77d4f85213103d4dab74ed454">mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4470e61c24c129a0299ca6c17240adbb"></a><!-- doxytag: member="tbb::mutex::lock" ref="4470e61c24c129a0299ca6c17240adbb" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#4331652c79dea1c1131bd59ab161b234">try_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring lock (non-blocking).  <a href="#4331652c79dea1c1131bd59ab161b234"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5fc9ef443ae75d966695546be399cc6b"></a><!-- doxytag: member="tbb::mutex::unlock" ref="5fc9ef443ae75d966695546be399cc6b" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">unlock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="caf34349c0fbe7b44f1a2ca0a3150dd0"></a><!-- doxytag: member="tbb::mutex::native_handle" ref="caf34349c0fbe7b44f1a2ca0a3150dd0" args="()" -->
+<a class="el" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><b>native_handle</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="795649a185b0d6af6dc81c5f378616dd"></a><!-- doxytag: member="tbb::mutex::set_state" ref="795649a185b0d6af6dc81c5f378616dd" args="(state_t to)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">set_state</a> (state_t to)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set the internal state. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="17b78176054e2bac1ff35b9535d1ea0c"></a><!-- doxytag: member="tbb::mutex::is_rw_mutex" ref="17b78176054e2bac1ff35b9535d1ea0c" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d4558e5a7c5b8d8e158ec7a1079669bd"></a><!-- doxytag: member="tbb::mutex::is_recursive_mutex" ref="d4558e5a7c5b8d8e158ec7a1079669bd" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f023a29c61451d02359095cbff0f3d45"></a><!-- doxytag: member="tbb::mutex::is_fair_mutex" ref="f023a29c61451d02359095cbff0f3d45" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = false</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::mutex::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped locking pattern.  <a href="a00178.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Wrapper around the platform's native reader-writer lock. 
+<p>
+For testing purposes only. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="4331652c79dea1c1131bd59ab161b234"></a><!-- doxytag: member="tbb::mutex::try_lock" ref="4331652c79dea1c1131bd59ab161b234" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::mutex::try_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring lock (non-blocking). 
+<p>
+Return true if lock acquired; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00372.html">mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00178.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00178.html
new file mode 100644 (file)
index 0000000..52cd3b6
--- /dev/null
@@ -0,0 +1,77 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00177.html">mutex</a>::<a class="el" href="a00178.html">scoped_lock</a></div>
+<h1>tbb::mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::mutex::scoped_lock" -->The scoped locking pattern.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00372.html">mutex.h</a>&gt;</code>
+<p>
+<a href="a00035.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1d403ae51b484df5d86d85ae38f11e6e"></a><!-- doxytag: member="tbb::mutex::scoped_lock::scoped_lock" ref="1d403ae51b484df5d86d85ae38f11e6e" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#1d403ae51b484df5d86d85ae38f11e6e">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct lock that has not acquired a mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="605a6b9af0f8cdabdf81825e0de99600"></a><!-- doxytag: member="tbb::mutex::scoped_lock::scoped_lock" ref="605a6b9af0f8cdabdf81825e0de99600" args="(mutex &amp;mutex)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#605a6b9af0f8cdabdf81825e0de99600">scoped_lock</a> (<a class="el" href="a00177.html">mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0ebbbecaf4311e9df7362cb76ceaa368"></a><!-- doxytag: member="tbb::mutex::scoped_lock::~scoped_lock" ref="0ebbbecaf4311e9df7362cb76ceaa368" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock (if lock is held). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="862e022841cdc522e4296a5533b22efd"></a><!-- doxytag: member="tbb::mutex::scoped_lock::acquire" ref="862e022841cdc522e4296a5533b22efd" args="(mutex &amp;mutex)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#862e022841cdc522e4296a5533b22efd">acquire</a> (<a class="el" href="a00177.html">mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="591e0c49b82bcedffcbe0923f1b915ec"></a><!-- doxytag: member="tbb::mutex::scoped_lock::try_acquire" ref="591e0c49b82bcedffcbe0923f1b915ec" args="(mutex &amp;mutex)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">try_acquire</a> (<a class="el" href="a00177.html">mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0d51d18cd99df3b2e93bf07378d0992c"></a><!-- doxytag: member="tbb::mutex::scoped_lock::release" ref="0d51d18cd99df3b2e93bf07378d0992c" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="93062a14ad2e50e2b7b485b913170779"></a><!-- doxytag: member="tbb::mutex::scoped_lock::mutex" ref="93062a14ad2e50e2b7b485b913170779" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>mutex</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped locking pattern. 
+<p>
+It helps to avoid the common problem of forgetting to release lock. It also nicely provides the "node" for queuing locks. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00372.html">mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00179.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00179.html
new file mode 100644 (file)
index 0000000..4d8da0f
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::null_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00179.html">null_mutex</a></div>
+<h1>tbb::null_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::null_mutex" -->A mutex which does nothing.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00374.html">null_mutex.h</a>&gt;</code>
+<p>
+<a href="a00036.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a960bed8b19a4d111ca047de206f5f77"></a><!-- doxytag: member="tbb::null_mutex::is_rw_mutex" ref="a960bed8b19a4d111ca047de206f5f77" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a0fefed74b341d43ccf0e2fc30d31b75"></a><!-- doxytag: member="tbb::null_mutex::is_recursive_mutex" ref="a0fefed74b341d43ccf0e2fc30d31b75" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="707cf399055d68177b1d64c2e112603a"></a><!-- doxytag: member="tbb::null_mutex::is_fair_mutex" ref="707cf399055d68177b1d64c2e112603a" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = true</td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00180.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Represents acquisition of a mutex.  <a href="a00180.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A mutex which does nothing. 
+<p>
+A <a class="el" href="a00179.html">null_mutex</a> does no operation and simulates success. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00374.html">null_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00180.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00180.html
new file mode 100644 (file)
index 0000000..7cee2ca
--- /dev/null
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::null_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00179.html">null_mutex</a>::<a class="el" href="a00180.html">scoped_lock</a></div>
+<h1>tbb::null_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::null_mutex::scoped_lock" -->Represents acquisition of a mutex.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00374.html">null_mutex.h</a>&gt;</code>
+<p>
+<a href="a00037.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="765e64065919fdd9665e78b5ec4d3c6f"></a><!-- doxytag: member="tbb::null_mutex::scoped_lock::scoped_lock" ref="765e64065919fdd9665e78b5ec4d3c6f" args="(null_mutex &amp;)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b> (<a class="el" href="a00179.html">null_mutex</a> &amp;)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f6889f00ca3946906e89e856988bb890"></a><!-- doxytag: member="tbb::null_mutex::scoped_lock::acquire" ref="f6889f00ca3946906e89e856988bb890" args="(null_mutex &amp;)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>acquire</b> (<a class="el" href="a00179.html">null_mutex</a> &amp;)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="90eb562a9de52b33362a6fe237b3df42"></a><!-- doxytag: member="tbb::null_mutex::scoped_lock::try_acquire" ref="90eb562a9de52b33362a6fe237b3df42" args="(null_mutex &amp;)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>try_acquire</b> (<a class="el" href="a00179.html">null_mutex</a> &amp;)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b29fdf07b4c295850a6e433001c2c6a7"></a><!-- doxytag: member="tbb::null_mutex::scoped_lock::release" ref="b29fdf07b4c295850a6e433001c2c6a7" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>release</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Represents acquisition of a mutex. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00374.html">null_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00181.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00181.html
new file mode 100644 (file)
index 0000000..507a570
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::null_rw_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00181.html">null_rw_mutex</a></div>
+<h1>tbb::null_rw_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::null_rw_mutex" -->A rw mutex which does nothing.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00375.html">null_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00038.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2f3235cbf63812f72e5755a4c820f523"></a><!-- doxytag: member="tbb::null_rw_mutex::is_rw_mutex" ref="2f3235cbf63812f72e5755a4c820f523" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e33ac3b047901e69e7997a16e221a330"></a><!-- doxytag: member="tbb::null_rw_mutex::is_recursive_mutex" ref="e33ac3b047901e69e7997a16e221a330" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e5248faf1412f9283185a0b5d72d7456"></a><!-- doxytag: member="tbb::null_rw_mutex::is_fair_mutex" ref="e5248faf1412f9283185a0b5d72d7456" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = true</td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00182.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Represents acquisition of a mutex.  <a href="a00182.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A rw mutex which does nothing. 
+<p>
+A <a class="el" href="a00181.html">null_rw_mutex</a> is a rw mutex that does nothing and simulates successful operation. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00375.html">null_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00182.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00182.html
new file mode 100644 (file)
index 0000000..925520e
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::null_rw_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00181.html">null_rw_mutex</a>::<a class="el" href="a00182.html">scoped_lock</a></div>
+<h1>tbb::null_rw_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::null_rw_mutex::scoped_lock" -->Represents acquisition of a mutex.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00375.html">null_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00039.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e11281a13a7b6243c6c9ab243c5ad5a8"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::scoped_lock" ref="e11281a13a7b6243c6c9ab243c5ad5a8" args="(null_rw_mutex &amp;, bool=true)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b> (<a class="el" href="a00181.html">null_rw_mutex</a> &amp;, bool=true)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="72c4c302fdfc20187a650348e432b0a7"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::acquire" ref="72c4c302fdfc20187a650348e432b0a7" args="(null_rw_mutex &amp;, bool=true)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>acquire</b> (<a class="el" href="a00181.html">null_rw_mutex</a> &amp;, bool=true)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fa1e2a5592ee2672470ea44d98f1c498"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::upgrade_to_writer" ref="fa1e2a5592ee2672470ea44d98f1c498" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>upgrade_to_writer</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="efcb7948649e1652d59aaff9c8ea40f1"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::downgrade_to_reader" ref="efcb7948649e1652d59aaff9c8ea40f1" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>downgrade_to_reader</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="56686c4dfe4a32a1d9bd8e7e729130e6"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::try_acquire" ref="56686c4dfe4a32a1d9bd8e7e729130e6" args="(null_rw_mutex &amp;, bool=true)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>try_acquire</b> (<a class="el" href="a00181.html">null_rw_mutex</a> &amp;, bool=true)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6146c803a4ad2f14263fdc019a72b5a9"></a><!-- doxytag: member="tbb::null_rw_mutex::scoped_lock::release" ref="6146c803a4ad2f14263fdc019a72b5a9" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>release</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Represents acquisition of a mutex. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00375.html">null_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00183.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00183.html
new file mode 100644 (file)
index 0000000..724f28b
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::parallel_do_feeder&lt; Item &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00183.html">parallel_do_feeder</a></div>
+<h1>tbb::parallel_do_feeder&lt; Item &gt; Class Template Reference</h1><!-- doxytag: class="tbb::parallel_do_feeder" -->Class the user supplied algorithm body uses to add new tasks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00376.html">parallel_do.h</a>&gt;</code>
+<p>
+<a href="a00040.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="40baaf0f6856f4491dd0adf896c93516"></a><!-- doxytag: member="tbb::parallel_do_feeder::add" ref="40baaf0f6856f4491dd0adf896c93516" args="(const Item &amp;item)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">add</a> (const Item &amp;item)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Add a work item to a running parallel_do. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b381179b22f5f5e8511470c73d64b37f"></a><!-- doxytag: member="tbb::parallel_do_feeder::internal::parallel_do_feeder_impl" ref="b381179b22f5f5e8511470c73d64b37f" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::parallel_do_feeder_impl</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Item&gt;<br>
+ class tbb::parallel_do_feeder&lt; Item &gt;</h3>
+
+Class the user supplied algorithm body uses to add new tasks. 
+<p>
+<dl compact><dt><b>Parameters:</b></dt><dd>
+  <table border="0" cellspacing="2" cellpadding="0">
+    <tr><td valign="top"></td><td valign="top"><em>Item</em>&nbsp;</td><td>Work item type </td></tr>
+  </table>
+</dl>
+
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00376.html">parallel_do.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00184.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00184.html
new file mode 100644 (file)
index 0000000..ac1286f
--- /dev/null
@@ -0,0 +1,140 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::parallel_while&lt; Body &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00184.html">parallel_while</a></div>
+<h1>tbb::parallel_while&lt; Body &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::parallel_while" -->Parallel iteration over a stream, with optional addition of more work.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00410.html">parallel_while.h</a>&gt;</code>
+<p>
+<a href="a00043.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fa297e53d3af2a101e712bc200233e9c"></a><!-- doxytag: member="tbb::parallel_while::value_type" ref="fa297e53d3af2a101e712bc200233e9c" args="" -->
+typedef Body::argument_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type of items. <br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="36e26ba3880c7bcf804a97ba0cbe133f"></a><!-- doxytag: member="tbb::parallel_while::parallel_while" ref="36e26ba3880c7bcf804a97ba0cbe133f" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">parallel_while</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty non-running parallel while. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6fcfc973cc56b79c6d0fbb8a31be7e84"></a><!-- doxytag: member="tbb::parallel_while::~parallel_while" ref="6fcfc973cc56b79c6d0fbb8a31be7e84" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">~parallel_while</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor cleans up data members before returning. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2">template&lt;typename Stream&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">run</a> (Stream &amp;stream, const Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Apply body.apply to each item in the stream.  <a href="#b32a0a6e5e09ebb7fad3e6652c19afe5"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html#e131c560057a58229992b61eb8dba4c6">add</a> (const <a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a> &amp;item)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Add a work item while running.  <a href="#e131c560057a58229992b61eb8dba4c6"></a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Body&gt;<br>
+ class tbb::parallel_while&lt; Body &gt;</h3>
+
+Parallel iteration over a stream, with optional addition of more work. 
+<p>
+The Body b has the requirement: <br>
+ "b(v)" <br>
+ "b.argument_type" <br>
+ where v is an argument_type 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="e131c560057a58229992b61eb8dba4c6"></a><!-- doxytag: member="tbb::parallel_while::add" ref="e131c560057a58229992b61eb8dba4c6" args="(const value_type &amp;item)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Body&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00184.html">tbb::parallel_while</a>&lt; Body &gt;::add           </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>item</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Add a work item while running. 
+<p>
+Should be executed only by body.apply or a thread spawned therefrom. 
+</div>
+</div><p>
+<a class="anchor" name="b32a0a6e5e09ebb7fad3e6652c19afe5"></a><!-- doxytag: member="tbb::parallel_while::run" ref="b32a0a6e5e09ebb7fad3e6652c19afe5" args="(Stream &amp;stream, const Body &amp;body)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename Body&gt; </div>
+<div class="memtemplate">
+template&lt;typename Stream&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void <a class="el" href="a00184.html">tbb::parallel_while</a>&lt; Body &gt;::run           </td>
+          <td>(</td>
+          <td class="paramtype">Stream &amp;&nbsp;</td>
+          <td class="paramname"> <em>stream</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Body &amp;&nbsp;</td>
+          <td class="paramname"> <em>body</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Apply body.apply to each item in the stream. 
+<p>
+A Stream s has the requirements <br>
+ "S::value_type" <br>
+ "s.pop_if_present(value) is convertible to bool 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00410.html">parallel_while.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00185.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00185.html
new file mode 100644 (file)
index 0000000..1f02951
--- /dev/null
@@ -0,0 +1,111 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::pipeline Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00185.html">pipeline</a></div>
+<h1>tbb::pipeline Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::pipeline" -->A processing pipeline that applies filters to items.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00419.html">pipeline.h</a>&gt;</code>
+<p>
+<a href="a00051.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="596dc3beba27099c4c8581cb419e1a59"></a><!-- doxytag: member="tbb::pipeline::pipeline" ref="596dc3beba27099c4c8581cb419e1a59" args="()" -->
+__TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#596dc3beba27099c4c8581cb419e1a59">pipeline</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty pipeline. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#49513c6c24f9d5bbbb27edca5efe01c9">~pipeline</a> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="38fb5c9c8395dd6f89a4ae2011a83e0d"></a><!-- doxytag: member="tbb::pipeline::add_filter" ref="38fb5c9c8395dd6f89a4ae2011a83e0d" args="(filter &amp;filter_)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#38fb5c9c8395dd6f89a4ae2011a83e0d">add_filter</a> (<a class="el" href="a00169.html">filter</a> &amp;filter_)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Add filter to end of pipeline. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f627616049b3fe36801f37ee40403ef8"></a><!-- doxytag: member="tbb::pipeline::run" ref="f627616049b3fe36801f37ee40403ef8" args="(size_t max_number_of_live_tokens)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#f627616049b3fe36801f37ee40403ef8">run</a> (size_t max_number_of_live_tokens)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Run the pipeline to completion. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="93d7fec8cd607b803dd2d79fb46bd260"></a><!-- doxytag: member="tbb::pipeline::run" ref="93d7fec8cd607b803dd2d79fb46bd260" args="(size_t max_number_of_live_tokens, tbb::task_group_context &amp;context)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#93d7fec8cd607b803dd2d79fb46bd260">run</a> (size_t max_number_of_live_tokens, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Run the pipeline to completion with user-supplied context. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2c84aef5b834b555ee220b176e25931e"></a><!-- doxytag: member="tbb::pipeline::clear" ref="2c84aef5b834b555ee220b176e25931e" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html#2c84aef5b834b555ee220b176e25931e">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Remove all filters from the pipeline. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8228ce0751009045e8158d2e642715a6"></a><!-- doxytag: member="tbb::pipeline::internal::stage_task" ref="8228ce0751009045e8158d2e642715a6" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::stage_task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05d67150ca324698ba852553e223d3eb"></a><!-- doxytag: member="tbb::pipeline::internal::pipeline_root_task" ref="05d67150ca324698ba852553e223d3eb" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::pipeline_root_task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4c991e50853b0cac7da039550344d3ef"></a><!-- doxytag: member="tbb::pipeline::filter" ref="4c991e50853b0cac7da039550344d3ef" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>filter</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e43b69a482df8e053cb199af69eb5139"></a><!-- doxytag: member="tbb::pipeline::thread_bound_filter" ref="e43b69a482df8e053cb199af69eb5139" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>thread_bound_filter</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9d033d41ff53a0ae6ef824aceee7ecbc"></a><!-- doxytag: member="tbb::pipeline::internal::pipeline_cleaner" ref="9d033d41ff53a0ae6ef824aceee7ecbc" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::pipeline_cleaner</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="589421589ec2788c2c39c0bc6272a09a"></a><!-- doxytag: member="tbb::pipeline::tbb::interface5::internal::pipeline_proxy" ref="589421589ec2788c2c39c0bc6272a09a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>tbb::interface5::internal::pipeline_proxy</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A processing pipeline that applies filters to items. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="49513c6c24f9d5bbbb27edca5efe01c9"></a><!-- doxytag: member="tbb::pipeline::~pipeline" ref="49513c6c24f9d5bbbb27edca5efe01c9" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual __TBB_EXPORTED_METHOD tbb::pipeline::~pipeline           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Though the current implementation declares the destructor virtual, do not rely on this detail. The virtualness is deprecated and may disappear in future versions of TBB. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00419.html">pipeline.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00186.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00186.html
new file mode 100644 (file)
index 0000000..8fe2db3
--- /dev/null
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::pre_scan_tag Struct Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00186.html">pre_scan_tag</a></div>
+<h1>tbb::pre_scan_tag Struct Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::pre_scan_tag" -->Used to indicate that the initial scan is being performed.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00401.html">parallel_scan.h</a>&gt;</code>
+<p>
+<a href="a00041.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d45d2cf548e51140ad6faafbea8ca6b5"></a><!-- doxytag: member="tbb::pre_scan_tag::is_final_scan" ref="d45d2cf548e51140ad6faafbea8ca6b5" args="()" -->
+static bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_final_scan</b> ()</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Used to indicate that the initial scan is being performed. 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00401.html">parallel_scan.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00187.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00187.html
new file mode 100644 (file)
index 0000000..805c66e
--- /dev/null
@@ -0,0 +1,75 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::queuing_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00187.html">queuing_mutex</a></div>
+<h1>tbb::queuing_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::queuing_mutex" -->Queuing lock with local-only spinning.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00431.html">queuing_mutex.h</a>&gt;</code>
+<p>
+<a href="a00054.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b389ad9c4db7293e4bdb5b8cda69ec04"></a><!-- doxytag: member="tbb::queuing_mutex::queuing_mutex" ref="b389ad9c4db7293e4bdb5b8cda69ec04" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">queuing_mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="96c1fe92760dcd1c5a7ed52c6599a72f"></a><!-- doxytag: member="tbb::queuing_mutex::internal_construct" ref="96c1fe92760dcd1c5a7ed52c6599a72f" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal_construct</b> ()</td></tr>
+
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="420932f70ff3b85f7280ff11a133938d"></a><!-- doxytag: member="tbb::queuing_mutex::is_rw_mutex" ref="420932f70ff3b85f7280ff11a133938d" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dc0c749f3a4e6ea75418677cb8f9205f"></a><!-- doxytag: member="tbb::queuing_mutex::is_recursive_mutex" ref="dc0c749f3a4e6ea75418677cb8f9205f" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="55d5339e4ca04b759f90c0c1ef966539"></a><!-- doxytag: member="tbb::queuing_mutex::is_fair_mutex" ref="55d5339e4ca04b759f90c0c1ef966539" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = true</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped locking pattern.  <a href="a00188.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Queuing lock with local-only spinning. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00431.html">queuing_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00188.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00188.html
new file mode 100644 (file)
index 0000000..eeda15a
--- /dev/null
@@ -0,0 +1,94 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::queuing_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00187.html">queuing_mutex</a>::<a class="el" href="a00188.html">scoped_lock</a></div>
+<h1>tbb::queuing_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::queuing_mutex::scoped_lock" -->The scoped locking pattern.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00431.html">queuing_mutex.h</a>&gt;</code>
+<p>
+<a href="a00055.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#db0fa3967491014572e24d6607bdc971">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct lock that has not acquired a mutex.  <a href="#db0fa3967491014572e24d6607bdc971"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9b51ef972f5618ac17caadb58841ab6d"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::scoped_lock" ref="9b51ef972f5618ac17caadb58841ab6d" args="(queuing_mutex &amp;m)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#9b51ef972f5618ac17caadb58841ab6d">scoped_lock</a> (<a class="el" href="a00187.html">queuing_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ac2c576a93570957d694192a5f491443"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::~scoped_lock" ref="ac2c576a93570957d694192a5f491443" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#ac2c576a93570957d694192a5f491443">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock (if lock is held). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="533e4fc8355ee321206a0609c42d909d"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::acquire" ref="533e4fc8355ee321206a0609c42d909d" args="(queuing_mutex &amp;m)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#533e4fc8355ee321206a0609c42d909d">acquire</a> (<a class="el" href="a00187.html">queuing_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e5a014fb817599386a87170cf2cf51a9"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::try_acquire" ref="e5a014fb817599386a87170cf2cf51a9" args="(queuing_mutex &amp;m)" -->
+bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#e5a014fb817599386a87170cf2cf51a9">try_acquire</a> (<a class="el" href="a00187.html">queuing_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex if free (i.e. non-blocking). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3bf2b8c87ff22115be9b2eac179f2d30"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::release" ref="3bf2b8c87ff22115be9b2eac179f2d30" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped locking pattern. 
+<p>
+It helps to avoid the common problem of forgetting to release lock. It also nicely provides the "node" for queuing locks. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="db0fa3967491014572e24d6607bdc971"></a><!-- doxytag: member="tbb::queuing_mutex::scoped_lock::scoped_lock" ref="db0fa3967491014572e24d6607bdc971" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">tbb::queuing_mutex::scoped_lock::scoped_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Construct lock that has not acquired a mutex. 
+<p>
+Equivalent to zero-initialization of *this. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00431.html">queuing_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00189.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00189.html
new file mode 100644 (file)
index 0000000..3088b6e
--- /dev/null
@@ -0,0 +1,81 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::queuing_rw_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00189.html">queuing_rw_mutex</a></div>
+<h1>tbb::queuing_rw_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::queuing_rw_mutex" -->Reader-writer lock with local-only spinning.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00432.html">queuing_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00056.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="85c90877c3447690ac4e2ac4ff8dea5e"></a><!-- doxytag: member="tbb::queuing_rw_mutex::queuing_rw_mutex" ref="85c90877c3447690ac4e2ac4ff8dea5e" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">queuing_rw_mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1ba73e3d95cfdf8323880bc623af9099"></a><!-- doxytag: member="tbb::queuing_rw_mutex::~queuing_rw_mutex" ref="1ba73e3d95cfdf8323880bc623af9099" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">~queuing_rw_mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eb88522d145ad7bcf5bebfa7d2a6122b"></a><!-- doxytag: member="tbb::queuing_rw_mutex::internal_construct" ref="eb88522d145ad7bcf5bebfa7d2a6122b" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal_construct</b> ()</td></tr>
+
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0dc1e28d3033e8f9556f5b13b7b57d0f"></a><!-- doxytag: member="tbb::queuing_rw_mutex::is_rw_mutex" ref="0dc1e28d3033e8f9556f5b13b7b57d0f" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e6ad08ef251f9ea898bd5f67963869c5"></a><!-- doxytag: member="tbb::queuing_rw_mutex::is_recursive_mutex" ref="e6ad08ef251f9ea898bd5f67963869c5" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="147b34120505e419f6ea8d631ec4375d"></a><!-- doxytag: member="tbb::queuing_rw_mutex::is_fair_mutex" ref="147b34120505e419f6ea8d631ec4375d" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = true</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped locking pattern.  <a href="a00190.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Reader-writer lock with local-only spinning. 
+<p>
+Adapted from Krieger, Stumm, et al. pseudocode at <a href="http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93">http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93</a> 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00432.html">queuing_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00190.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00190.html
new file mode 100644 (file)
index 0000000..4b3a44a
--- /dev/null
@@ -0,0 +1,123 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::queuing_rw_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00189.html">queuing_rw_mutex</a>::<a class="el" href="a00190.html">scoped_lock</a></div>
+<h1>tbb::queuing_rw_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::queuing_rw_mutex::scoped_lock" -->The scoped locking pattern.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00432.html">queuing_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00057.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#c62e365be7bcbba091c9ea7454a4d22c">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct lock that has not acquired a mutex.  <a href="#c62e365be7bcbba091c9ea7454a4d22c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fbb8798792d3aebb136c46fc63d2529e"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::scoped_lock" ref="fbb8798792d3aebb136c46fc63d2529e" args="(queuing_rw_mutex &amp;m, bool write=true)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#fbb8798792d3aebb136c46fc63d2529e">scoped_lock</a> (<a class="el" href="a00189.html">queuing_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="32c7d67a660d23ebbaab1a1d2826d31a"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::~scoped_lock" ref="32c7d67a660d23ebbaab1a1d2826d31a" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock (if lock is held). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a8dd5ab8686e76de21587544dbb681e0"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::acquire" ref="a8dd5ab8686e76de21587544dbb681e0" args="(queuing_rw_mutex &amp;m, bool write=true)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">acquire</a> (<a class="el" href="a00189.html">queuing_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2e4ff6c9ec2fee6682f95290d1f42baa"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::try_acquire" ref="2e4ff6c9ec2fee6682f95290d1f42baa" args="(queuing_rw_mutex &amp;m, bool write=true)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#2e4ff6c9ec2fee6682f95290d1f42baa">try_acquire</a> (<a class="el" href="a00189.html">queuing_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="67ae221109ddc69510ab593874e435d4"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::release" ref="67ae221109ddc69510ab593874e435d4" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#67ae221109ddc69510ab593874e435d4">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#11ba1da4a722c9e6f73339a52c487e82">upgrade_to_writer</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Upgrade reader to become a writer.  <a href="#11ba1da4a722c9e6f73339a52c487e82"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0d2f93edf7b15ec4bcee138823220c52"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::downgrade_to_reader" ref="0d2f93edf7b15ec4bcee138823220c52" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00190.html#0d2f93edf7b15ec4bcee138823220c52">downgrade_to_reader</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Downgrade writer to become a reader. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped locking pattern. 
+<p>
+It helps to avoid the common problem of forgetting to release lock. It also nicely provides the "node" for queuing locks. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="c62e365be7bcbba091c9ea7454a4d22c"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::scoped_lock" ref="c62e365be7bcbba091c9ea7454a4d22c" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">tbb::queuing_rw_mutex::scoped_lock::scoped_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Construct lock that has not acquired a mutex. 
+<p>
+Equivalent to zero-initialization of *this. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="11ba1da4a722c9e6f73339a52c487e82"></a><!-- doxytag: member="tbb::queuing_rw_mutex::scoped_lock::upgrade_to_writer" ref="11ba1da4a722c9e6f73339a52c487e82" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::queuing_rw_mutex::scoped_lock::upgrade_to_writer           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Upgrade reader to become a writer. 
+<p>
+Returns true if the upgrade happened without re-acquiring the lock and false if opposite 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00432.html">queuing_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00191.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00191.html
new file mode 100644 (file)
index 0000000..8c031e4
--- /dev/null
@@ -0,0 +1,221 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::reader_writer_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<a class="el" href="a00191.html">reader_writer_lock</a></div>
+<h1>tbb::interface5::reader_writer_lock Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::interface5::reader_writer_lock" -->Writer-preference reader-writer lock with local-only spinning on readers.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00433.html">reader_writer_lock.h</a>&gt;</code>
+<p>
+<a href="a00058.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6f921f0d7c1812ceb5674418c8b6ccaf005142e2b9af377a8d33c6604d858862"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::waiting_nonblocking" ref="6f921f0d7c1812ceb5674418c8b6ccaf005142e2b9af377a8d33c6604d858862" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>waiting_nonblocking</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6f921f0d7c1812ceb5674418c8b6ccaf18bcc9aae8f056c34fb61bb7fc39b432"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::waiting" ref="6f921f0d7c1812ceb5674418c8b6ccaf18bcc9aae8f056c34fb61bb7fc39b432" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>waiting</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6f921f0d7c1812ceb5674418c8b6ccaf423c082d5cdc9bb12d0eeadcae56adb5"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::active" ref="6f921f0d7c1812ceb5674418c8b6ccaf423c082d5cdc9bb12d0eeadcae56adb5" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>active</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6f921f0d7c1812ceb5674418c8b6ccafa9a5b17bdfb0a6d59b3cd58fecbd9610"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::invalid" ref="6f921f0d7c1812ceb5674418c8b6ccafa9a5b17bdfb0a6d59b3cd58fecbd9610" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>invalid</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">status_t</a> { <b>waiting_nonblocking</b>, 
+<b>waiting</b>, 
+<b>active</b>, 
+<b>invalid</b>
+ }</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Status type for nodes associated with lock instances.  <a href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">More...</a><br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c1431c4293e777efd9aab9a95c2a46e1"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::reader_writer_lock" ref="c1431c4293e777efd9aab9a95c2a46e1" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">reader_writer_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Constructs a new <a class="el" href="a00191.html">reader_writer_lock</a>. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5135f64f7b7339017f33d956445edbee"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::~reader_writer_lock" ref="5135f64f7b7339017f33d956445edbee" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#5135f64f7b7339017f33d956445edbee">~reader_writer_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructs a <a class="el" href="a00191.html">reader_writer_lock</a> object. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquires the <a class="el" href="a00191.html">reader_writer_lock</a> for write.  <a href="#2653d1a2d560059a51219a8ceab3ade9"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#721eb173e154ab38292273e9266a9b07">try_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Tries to acquire the <a class="el" href="a00191.html">reader_writer_lock</a> for write.  <a href="#721eb173e154ab38292273e9266a9b07"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#d9d16a24d9f6c3dada73c6b9ff214f5b">lock_read</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquires the <a class="el" href="a00191.html">reader_writer_lock</a> for read.  <a href="#d9d16a24d9f6c3dada73c6b9ff214f5b"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#595fb23952e3b89426b1f7938dea9b11">try_lock_read</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Tries to acquire the <a class="el" href="a00191.html">reader_writer_lock</a> for read.  <a href="#595fb23952e3b89426b1f7938dea9b11"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5113b32689305599b2c36b5831547704"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::unlock" ref="5113b32689305599b2c36b5831547704" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html#5113b32689305599b2c36b5831547704">unlock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Releases the <a class="el" href="a00191.html">reader_writer_lock</a>. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="680274059774b9188889d34986314f81"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read" ref="680274059774b9188889d34986314f81" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock_read</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00192.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped lock pattern for write locks.  <a href="a00192.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00193.html">scoped_lock_read</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped lock pattern for read locks.  <a href="a00193.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Writer-preference reader-writer lock with local-only spinning on readers. 
+<p>
+Loosely adapted from Mellor-Crummey and Scott pseudocode at <a href="http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp">http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp</a> 
+<p>
+<hr><h2>Member Enumeration Documentation</h2>
+<a class="anchor" name="6f921f0d7c1812ceb5674418c8b6ccaf"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::status_t" ref="6f921f0d7c1812ceb5674418c8b6ccaf" args="" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">tbb::interface5::reader_writer_lock::status_t</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Status type for nodes associated with lock instances. 
+<p>
+waiting_nonblocking: the wait state for nonblocking lock instances; for writes, these transition straight to active states; for reads, these are unused.<p>
+waiting: the start and spin state for all lock instances; these will transition to active state when appropriate. Non-blocking write locks transition from this state to waiting_nonblocking immediately.<p>
+active: the active state means that the lock instance holds the lock; it will transition to invalid state during node deletion<p>
+invalid: the end state for all nodes; this is set in the destructor so if we encounter this state, we are looking at memory that has already been freed<p>
+The state diagrams below describe the status transitions. Single arrows indicate that the thread that owns the node is responsible for the transition; double arrows indicate that any thread could make the transition.<p>
+State diagram for <a class="el" href="a00192.html">scoped_lock</a> status:<p>
+waiting ----------&gt; waiting_nonblocking | _____________/ | V V V active -----------------&gt; invalid<p>
+State diagram for <a class="el" href="a00193.html">scoped_lock_read</a> status:<p>
+waiting | V active -----------------&gt;invalid 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="2653d1a2d560059a51219a8ceab3ade9"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::lock" ref="2653d1a2d560059a51219a8ceab3ade9" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::interface5::reader_writer_lock::lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Acquires the <a class="el" href="a00191.html">reader_writer_lock</a> for write. 
+<p>
+If the lock is currently held in write mode by another context, the writer will block by spinning on a local variable. Exceptions thrown: <a class="el" href="a00173.html">improper_lock</a> The context tries to acquire a <a class="el" href="a00191.html">reader_writer_lock</a> that it already has write ownership of. 
+</div>
+</div><p>
+<a class="anchor" name="d9d16a24d9f6c3dada73c6b9ff214f5b"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::lock_read" ref="d9d16a24d9f6c3dada73c6b9ff214f5b" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::interface5::reader_writer_lock::lock_read           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Acquires the <a class="el" href="a00191.html">reader_writer_lock</a> for read. 
+<p>
+If the lock is currently held by a writer, this reader will block and wait until the writers are done. Exceptions thrown: <a class="el" href="a00173.html">improper_lock</a> The context tries to acquire a <a class="el" href="a00191.html">reader_writer_lock</a> that it already has write ownership of. 
+</div>
+</div><p>
+<a class="anchor" name="721eb173e154ab38292273e9266a9b07"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::try_lock" ref="721eb173e154ab38292273e9266a9b07" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool __TBB_EXPORTED_METHOD tbb::interface5::reader_writer_lock::try_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Tries to acquire the <a class="el" href="a00191.html">reader_writer_lock</a> for write. 
+<p>
+This function does not block. Return Value: True or false, depending on whether the lock is acquired or not. If the lock is already held by this acquiring context, <a class="el" href="a00191.html#721eb173e154ab38292273e9266a9b07">try_lock()</a> returns false. 
+</div>
+</div><p>
+<a class="anchor" name="595fb23952e3b89426b1f7938dea9b11"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::try_lock_read" ref="595fb23952e3b89426b1f7938dea9b11" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool __TBB_EXPORTED_METHOD tbb::interface5::reader_writer_lock::try_lock_read           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Tries to acquire the <a class="el" href="a00191.html">reader_writer_lock</a> for read. 
+<p>
+This function does not block. Return Value: True or false, depending on whether the lock is acquired or not. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00433.html">reader_writer_lock.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00192.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00192.html
new file mode 100644 (file)
index 0000000..8589b7c
--- /dev/null
@@ -0,0 +1,67 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::reader_writer_lock::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<a class="el" href="a00191.html">reader_writer_lock</a>::<a class="el" href="a00192.html">scoped_lock</a></div>
+<h1>tbb::interface5::reader_writer_lock::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::interface5::reader_writer_lock::scoped_lock" -->The scoped lock pattern for write locks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00433.html">reader_writer_lock.h</a>&gt;</code>
+<p>
+<a href="a00059.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cf19f20e082887c1bb0ba6b0911c3583"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock::scoped_lock" ref="cf19f20e082887c1bb0ba6b0911c3583" args="(reader_writer_lock &amp;lock)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00192.html#cf19f20e082887c1bb0ba6b0911c3583">scoped_lock</a> (<a class="el" href="a00191.html">reader_writer_lock</a> &amp;lock)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct with blocking attempt to acquire write lock on the passed-in lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="70246e0260493625ff956fa5926fc71f"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock::~scoped_lock" ref="70246e0260493625ff956fa5926fc71f" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00192.html#70246e0260493625ff956fa5926fc71f">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor, releases the write lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="aea1cb0e88be9874f0f72e52063d0b90"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock::operator new" ref="aea1cb0e88be9874f0f72e52063d0b90" args="(size_t s)" -->
+void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator new</b> (size_t s)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="db804a05fcd37f7e81b94752e45039f7"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock::operator delete" ref="db804a05fcd37f7e81b94752e45039f7" args="(void *p)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator delete</b> (void *p)</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="38eb53abbe72543f5967c63aa95b403e"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock::reader_writer_lock" ref="38eb53abbe72543f5967c63aa95b403e" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>reader_writer_lock</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped lock pattern for write locks. 
+<p>
+Scoped locks help avoid the common problem of forgetting to release the lock. This type is also serves as the node for queuing locks. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00433.html">reader_writer_lock.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00193.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00193.html
new file mode 100644 (file)
index 0000000..94177d1
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::reader_writer_lock::scoped_lock_read Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<a class="el" href="a00191.html">reader_writer_lock</a>::<a class="el" href="a00193.html">scoped_lock_read</a></div>
+<h1>tbb::interface5::reader_writer_lock::scoped_lock_read Class Reference</h1><!-- doxytag: class="tbb::interface5::reader_writer_lock::scoped_lock_read" -->The scoped lock pattern for read locks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00433.html">reader_writer_lock.h</a>&gt;</code>
+<p>
+<a href="a00060.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="87ab0dc8f7216e6ba0f7acd6aec33064"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read::scoped_lock_read" ref="87ab0dc8f7216e6ba0f7acd6aec33064" args="(reader_writer_lock &amp;lock)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00193.html#87ab0dc8f7216e6ba0f7acd6aec33064">scoped_lock_read</a> (<a class="el" href="a00191.html">reader_writer_lock</a> &amp;lock)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct with blocking attempt to acquire read lock on the passed-in lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bd21c5f3d555d64d1de8658e15bf4966"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read::~scoped_lock_read" ref="bd21c5f3d555d64d1de8658e15bf4966" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">~scoped_lock_read</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor, releases the read lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f09e1817dddf97cc2182a573945eef91"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read::operator new" ref="f09e1817dddf97cc2182a573945eef91" args="(size_t s)" -->
+void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator new</b> (size_t s)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4f9e148ec33895c0d2669ff6820cf164"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read::operator delete" ref="4f9e148ec33895c0d2669ff6820cf164" args="(void *p)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator delete</b> (void *p)</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="38eb53abbe72543f5967c63aa95b403e"></a><!-- doxytag: member="tbb::interface5::reader_writer_lock::scoped_lock_read::reader_writer_lock" ref="38eb53abbe72543f5967c63aa95b403e" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>reader_writer_lock</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped lock pattern for read locks. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00433.html">reader_writer_lock.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00194.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00194.html
new file mode 100644 (file)
index 0000000..8a85f15
--- /dev/null
@@ -0,0 +1,118 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::recursive_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00194.html">recursive_mutex</a></div>
+<h1>tbb::recursive_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::recursive_mutex" -->Mutex that allows recursive mutex acquisition.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00434.html">recursive_mutex.h</a>&gt;</code>
+<p>
+<a href="a00061.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="889fa8cc32dd707eef7c0f52dda09c0d"></a><!-- doxytag: member="tbb::recursive_mutex::native_handle_type" ref="889fa8cc32dd707eef7c0f52dda09c0d" args="" -->
+typedef LPCRITICAL_SECTION&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return native_handle. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="26a40fae42bf6cd9f3f77ee7482d6164"></a><!-- doxytag: member="tbb::recursive_mutex::native_handle_type" ref="26a40fae42bf6cd9f3f77ee7482d6164" args="" -->
+typedef pthread_mutex_t *&nbsp;</td><td class="memItemRight" valign="bottom"><b>native_handle_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d2fceb7f95c24a8cd1457d4527e4b8c6"></a><!-- doxytag: member="tbb::recursive_mutex::recursive_mutex" ref="d2fceb7f95c24a8cd1457d4527e4b8c6" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">recursive_mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired <a class="el" href="a00194.html">recursive_mutex</a>. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4c342c69d47f4bb0b393535dee4015d6"></a><!-- doxytag: member="tbb::recursive_mutex::lock" ref="4c342c69d47f4bb0b393535dee4015d6" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html#86e719b0afee25704af11ab97694d240">try_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring lock (non-blocking).  <a href="#86e719b0afee25704af11ab97694d240"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f0a96e26b7f074588dc31e32524856ae"></a><!-- doxytag: member="tbb::recursive_mutex::unlock" ref="f0a96e26b7f074588dc31e32524856ae" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">unlock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6baa9ce4b394c39622456fd8a68f8bd8"></a><!-- doxytag: member="tbb::recursive_mutex::native_handle" ref="6baa9ce4b394c39622456fd8a68f8bd8" args="()" -->
+<a class="el" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><b>native_handle</b> ()</td></tr>
+
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="718cc53f6b33d8c396ccca0e4ebc5606"></a><!-- doxytag: member="tbb::recursive_mutex::is_rw_mutex" ref="718cc53f6b33d8c396ccca0e4ebc5606" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f6149d7541063d8b35afd98df6f0f89d"></a><!-- doxytag: member="tbb::recursive_mutex::is_recursive_mutex" ref="f6149d7541063d8b35afd98df6f0f89d" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f01aacad6b9b9d51573c0803323182fe"></a><!-- doxytag: member="tbb::recursive_mutex::is_fair_mutex" ref="f01aacad6b9b9d51573c0803323182fe" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = false</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped locking pattern.  <a href="a00195.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Mutex that allows recursive mutex acquisition. 
+<p>
+Mutex that allows recursive mutex acquisition. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="86e719b0afee25704af11ab97694d240"></a><!-- doxytag: member="tbb::recursive_mutex::try_lock" ref="86e719b0afee25704af11ab97694d240" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::recursive_mutex::try_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring lock (non-blocking). 
+<p>
+Return true if lock acquired; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00434.html">recursive_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00195.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00195.html
new file mode 100644 (file)
index 0000000..d20f5af
--- /dev/null
@@ -0,0 +1,77 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::recursive_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00194.html">recursive_mutex</a>::<a class="el" href="a00195.html">scoped_lock</a></div>
+<h1>tbb::recursive_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::recursive_mutex::scoped_lock" -->The scoped locking pattern.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00434.html">recursive_mutex.h</a>&gt;</code>
+<p>
+<a href="a00062.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d82d4d36fbf9727a493d26ae50855fe7"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::scoped_lock" ref="d82d4d36fbf9727a493d26ae50855fe7" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#d82d4d36fbf9727a493d26ae50855fe7">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct lock that has not acquired a <a class="el" href="a00194.html">recursive_mutex</a>. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dec17713c4c1321ac8fec66816d0c602"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::scoped_lock" ref="dec17713c4c1321ac8fec66816d0c602" args="(recursive_mutex &amp;mutex)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#dec17713c4c1321ac8fec66816d0c602">scoped_lock</a> (<a class="el" href="a00194.html">recursive_mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c1197ffb8f3cd9d4fed71d7e06265b7c"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::~scoped_lock" ref="c1197ffb8f3cd9d4fed71d7e06265b7c" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock (if lock is held). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7fb04da37cccf8c99b1f9102d9074f9a"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::acquire" ref="7fb04da37cccf8c99b1f9102d9074f9a" args="(recursive_mutex &amp;mutex)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">acquire</a> (<a class="el" href="a00194.html">recursive_mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="36bfc3e93e3ef6340abef4901444d340"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::try_acquire" ref="36bfc3e93e3ef6340abef4901444d340" args="(recursive_mutex &amp;mutex)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">try_acquire</a> (<a class="el" href="a00194.html">recursive_mutex</a> &amp;<a class="el" href="a00177.html">mutex</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquire lock on given <a class="el" href="a00194.html">recursive_mutex</a>. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ac480ea0e9d5ea0345a67d57008b6263"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::release" ref="ac480ea0e9d5ea0345a67d57008b6263" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="459818b78a3e9985dc5a9d5638b6593e"></a><!-- doxytag: member="tbb::recursive_mutex::scoped_lock::recursive_mutex" ref="459818b78a3e9985dc5a9d5638b6593e" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>recursive_mutex</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped locking pattern. 
+<p>
+It helps to avoid the common problem of forgetting to release lock. It also nicely provides the "node" for queuing locks. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00434.html">recursive_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00196.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00196.html
new file mode 100644 (file)
index 0000000..c61b920
--- /dev/null
@@ -0,0 +1,112 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::scalable_allocator&lt; T &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00196.html">scalable_allocator</a></div>
+<h1>tbb::scalable_allocator&lt; T &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::scalable_allocator" -->Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00435.html">scalable_allocator.h</a>&gt;</code>
+<p>
+<a href="a00063.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5a1cf13c339ee177a103a7e19bf2710a"></a><!-- doxytag: member="tbb::scalable_allocator::value_type" ref="5a1cf13c339ee177a103a7e19bf2710a" args="" -->
+typedef internal::allocator_type&lt;<br>
+ T &gt;::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a5121ec62bc79faba6a4b674d59e7260"></a><!-- doxytag: member="tbb::scalable_allocator::pointer" ref="a5121ec62bc79faba6a4b674d59e7260" args="" -->
+typedef value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="846a6fcec3bc7fa3839a12266a729a02"></a><!-- doxytag: member="tbb::scalable_allocator::const_pointer" ref="846a6fcec3bc7fa3839a12266a729a02" args="" -->
+typedef const value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="254d7749de9277bb7711470c04d963ea"></a><!-- doxytag: member="tbb::scalable_allocator::reference" ref="254d7749de9277bb7711470c04d963ea" args="" -->
+typedef value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="acad193a7f6e16253f772ba3e9b2c98f"></a><!-- doxytag: member="tbb::scalable_allocator::const_reference" ref="acad193a7f6e16253f772ba3e9b2c98f" args="" -->
+typedef const value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4a5cb7482f222a14b513bbdd4ad8507d"></a><!-- doxytag: member="tbb::scalable_allocator::size_type" ref="4a5cb7482f222a14b513bbdd4ad8507d" args="" -->
+typedef size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cb96c23f650cf91a73e2810b54c481bb"></a><!-- doxytag: member="tbb::scalable_allocator::difference_type" ref="cb96c23f650cf91a73e2810b54c481bb" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eee1873729ed05c7f92544c3ff0c08bc"></a><!-- doxytag: member="tbb::scalable_allocator::scalable_allocator" ref="eee1873729ed05c7f92544c3ff0c08bc" args="(const scalable_allocator &amp;)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>scalable_allocator</b> (const <a class="el" href="a00196.html">scalable_allocator</a> &amp;)  throw ()</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="d56dc9f231c59f912297ac3c3b3b93b6"></a><!-- doxytag: member="tbb::scalable_allocator::scalable_allocator" ref="d56dc9f231c59f912297ac3c3b3b93b6" args="(const scalable_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>scalable_allocator</b> (const <a class="el" href="a00196.html">scalable_allocator</a>&lt; U &gt; &amp;)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="78fa13a188b542ea724d45a9dbf5a071"></a><!-- doxytag: member="tbb::scalable_allocator::address" ref="78fa13a188b542ea724d45a9dbf5a071" args="(reference x) const " -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="14be4ad8eb478c87e240e605221ab455"></a><!-- doxytag: member="tbb::scalable_allocator::address" ref="14be4ad8eb478c87e240e605221ab455" args="(const_reference x) const " -->
+const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (const_reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="726b1586d05d44665a36e1c7b2699bfd"></a><!-- doxytag: member="tbb::scalable_allocator::allocate" ref="726b1586d05d44665a36e1c7b2699bfd" args="(size_type n, const void *=0)" -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">allocate</a> (size_type n, const void *=0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocate space for n objects. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f806a238c18cbcfb531e1e0a0d2ec59d"></a><!-- doxytag: member="tbb::scalable_allocator::deallocate" ref="f806a238c18cbcfb531e1e0a0d2ec59d" args="(pointer p, size_type)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">deallocate</a> (pointer p, size_type)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Free previously allocated block of memory. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="880e766f1d913988c21973dbdd874fd5"></a><!-- doxytag: member="tbb::scalable_allocator::max_size" ref="880e766f1d913988c21973dbdd874fd5" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html#880e766f1d913988c21973dbdd874fd5">max_size</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Largest value for which method allocate might succeed. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ae824c87fcc35467057fd3304c5e97ba"></a><!-- doxytag: member="tbb::scalable_allocator::construct" ref="ae824c87fcc35467057fd3304c5e97ba" args="(pointer p, const value_type &amp;value)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>construct</b> (pointer p, const value_type &amp;value)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="743f878b04a492bf7ae0e82a64dc20c8"></a><!-- doxytag: member="tbb::scalable_allocator::destroy" ref="743f878b04a492bf7ae0e82a64dc20c8" args="(pointer p)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>destroy</b> (pointer p)</td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T&gt;<br>
+ class tbb::scalable_allocator&lt; T &gt;</h3>
+
+Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5. 
+<p>
+The members are ordered the same way they are in section 20.4.1 of the ISO C++ standard. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00435.html">scalable_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00197.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00197.html
new file mode 100644 (file)
index 0000000..fd819a7
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::scalable_allocator&lt; void &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00197.html">scalable_allocator&lt; void &gt;</a></div>
+<h1>tbb::scalable_allocator&lt; void &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::scalable_allocator&lt; void &gt;" -->Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00435.html">scalable_allocator.h</a>&gt;</code>
+<p>
+<a href="a00065.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2046816029c2c7856f7684aed2af9cff"></a><!-- doxytag: member="tbb::scalable_allocator&lt; void &gt;::pointer" ref="2046816029c2c7856f7684aed2af9cff" args="" -->
+typedef void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="591c568d23f8aadf6ea8d1dc939c4e75"></a><!-- doxytag: member="tbb::scalable_allocator&lt; void &gt;::const_pointer" ref="591c568d23f8aadf6ea8d1dc939c4e75" args="" -->
+typedef const void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="64dbab666af0740c30b77edac9f157ce"></a><!-- doxytag: member="tbb::scalable_allocator&lt; void &gt;::value_type" ref="64dbab666af0740c30b77edac9f157ce" args="" -->
+typedef void&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;&gt;<br>
+ class tbb::scalable_allocator&lt; void &gt;</h3>
+
+Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00435.html">scalable_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00198.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00198.html
new file mode 100644 (file)
index 0000000..07f23ff
--- /dev/null
@@ -0,0 +1,64 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::simple_partitioner Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00198.html">simple_partitioner</a></div>
+<h1>tbb::simple_partitioner Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::simple_partitioner" -->A simple partitioner.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00414.html">partitioner.h</a>&gt;</code>
+<p>
+<a href="a00044.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a47f7e0208a2bf68f35b51b27d2ddf2a"></a><!-- doxytag: member="tbb::simple_partitioner::internal::start_for" ref="a47f7e0208a2bf68f35b51b27d2ddf2a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_for</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8f710e431b62c2a48914bd99d0fe034"></a><!-- doxytag: member="tbb::simple_partitioner::internal::start_reduce" ref="e8f710e431b62c2a48914bd99d0fe034" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_reduce</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="76d97ae6ad98db0acfc8ed8cb7c76705"></a><!-- doxytag: member="tbb::simple_partitioner::internal::start_scan" ref="76d97ae6ad98db0acfc8ed8cb7c76705" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::start_scan</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>partition_type</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A simple partitioner. 
+<p>
+Divides the range until the range is not divisible. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00414.html">partitioner.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00199.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00199.html
new file mode 100644 (file)
index 0000000..84aaeed
--- /dev/null
@@ -0,0 +1,131 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::spin_mutex Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00199.html">spin_mutex</a></div>
+<h1>tbb::spin_mutex Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::spin_mutex" -->A lock that occupies a single byte.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00436.html">spin_mutex.h</a>&gt;</code>
+<p>
+<a href="a00067.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">spin_mutex</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired lock.  <a href="#3d8fb44644fd8d41ada1fbeba7409be3"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4b3fa21632815f8fab2fd6c67ec0d48c"></a><!-- doxytag: member="tbb::spin_mutex::internal_construct" ref="4b3fa21632815f8fab2fd6c67ec0d48c" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal_construct</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4f748989e19b6045e3a2d2ee73626a28"></a><!-- doxytag: member="tbb::spin_mutex::lock" ref="4f748989e19b6045e3a2d2ee73626a28" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">try_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring lock (non-blocking).  <a href="#8f9a58fb56a2b4c5efe1a7f7c1ae2074"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0e843ee6265f57f27d228ba91e7308ef"></a><!-- doxytag: member="tbb::spin_mutex::unlock" ref="0e843ee6265f57f27d228ba91e7308ef" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">unlock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5a7c93ca9e7ab656037a43984f24c051"></a><!-- doxytag: member="tbb::spin_mutex::is_rw_mutex" ref="5a7c93ca9e7ab656037a43984f24c051" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a967c455d0d80c9bbe030905201391b6"></a><!-- doxytag: member="tbb::spin_mutex::is_recursive_mutex" ref="a967c455d0d80c9bbe030905201391b6" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ec12e4036073fb684b6d2d33493ed0aa"></a><!-- doxytag: member="tbb::spin_mutex::is_fair_mutex" ref="ec12e4036073fb684b6d2d33493ed0aa" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = false</td></tr>
+
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a36f15b770a1bb0b56e33d7fa75ebd1a"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock" ref="a36f15b770a1bb0b56e33d7fa75ebd1a" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>scoped_lock</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Represents acquisition of a mutex.  <a href="a00200.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A lock that occupies a single byte. 
+<p>
+A <a class="el" href="a00199.html">spin_mutex</a> is a spin mutex that fits in a single byte. It should be used only for locking short critical sections (typically less than 20 instructions) when fairness is not an issue. If zero-initialized, the mutex is considered unheld. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="3d8fb44644fd8d41ada1fbeba7409be3"></a><!-- doxytag: member="tbb::spin_mutex::spin_mutex" ref="3d8fb44644fd8d41ada1fbeba7409be3" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">tbb::spin_mutex::spin_mutex           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Construct unacquired lock. 
+<p>
+Equivalent to zero-initialization of *this. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="8f9a58fb56a2b4c5efe1a7f7c1ae2074"></a><!-- doxytag: member="tbb::spin_mutex::try_lock" ref="8f9a58fb56a2b4c5efe1a7f7c1ae2074" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::spin_mutex::try_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring lock (non-blocking). 
+<p>
+Return true if lock acquired; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00436.html">spin_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00200.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00200.html
new file mode 100644 (file)
index 0000000..bf7e444
--- /dev/null
@@ -0,0 +1,97 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::spin_mutex::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00199.html">spin_mutex</a>::<a class="el" href="a00200.html">scoped_lock</a></div>
+<h1>tbb::spin_mutex::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::spin_mutex::scoped_lock" -->Represents acquisition of a mutex.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00436.html">spin_mutex.h</a>&gt;</code>
+<p>
+<a href="a00068.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="29ae680ae7f5e685c2e15535b9c855b3"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::scoped_lock" ref="29ae680ae7f5e685c2e15535b9c855b3" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#29ae680ae7f5e685c2e15535b9c855b3">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct without acquiring a mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5ce6807050a9e8f87bcb4a65dccb12ef"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::scoped_lock" ref="5ce6807050a9e8f87bcb4a65dccb12ef" args="(spin_mutex &amp;m)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#5ce6807050a9e8f87bcb4a65dccb12ef">scoped_lock</a> (<a class="el" href="a00199.html">spin_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct and acquire lock on a mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3ee3c338732b1f64b0b32a757807a30d"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::acquire" ref="3ee3c338732b1f64b0b32a757807a30d" args="(spin_mutex &amp;m)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">acquire</a> (<a class="el" href="a00199.html">spin_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">try_acquire</a> (<a class="el" href="a00199.html">spin_mutex</a> &amp;m)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring lock (non-blocking).  <a href="#9297ec188534b45dc0ca48f2f39a0501"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="eeb615e68e963e6bf8d9c11402d0ce8e"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::release" ref="eeb615e68e963e6bf8d9c11402d0ce8e" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ac6fa425d1f06c56d8b70abc51aac844"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::~scoped_lock" ref="ac6fa425d1f06c56d8b70abc51aac844" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy lock. If holding a lock, releases the lock first. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6ee2a2cdaf6a2128849e7d7723d9174f"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::spin_mutex" ref="6ee2a2cdaf6a2128849e7d7723d9174f" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>spin_mutex</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Represents acquisition of a mutex. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="9297ec188534b45dc0ca48f2f39a0501"></a><!-- doxytag: member="tbb::spin_mutex::scoped_lock::try_acquire" ref="9297ec188534b45dc0ca48f2f39a0501" args="(spin_mutex &amp;m)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::spin_mutex::scoped_lock::try_acquire           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00199.html">spin_mutex</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>m</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring lock (non-blocking). 
+<p>
+Return true if lock acquired; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00436.html">spin_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00201.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00201.html
new file mode 100644 (file)
index 0000000..28f5fce
--- /dev/null
@@ -0,0 +1,133 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::spin_rw_mutex_v3 Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00201.html">spin_rw_mutex_v3</a></div>
+<h1>tbb::spin_rw_mutex_v3 Class Reference<br>
+<small>
+[<a class="el" href="a00283.html">Synchronization</a>]</small>
+</h1><!-- doxytag: class="tbb::spin_rw_mutex_v3" -->Fast, unfair, spinning reader-writer lock with backoff and writer-preference.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00437.html">spin_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00069.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="61332b2756de89f3f5f69310cbb6e70c"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::spin_rw_mutex_v3" ref="61332b2756de89f3f5f69310cbb6e70c" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">spin_rw_mutex_v3</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct unacquired mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9a815fb2759e55072ed413f1b6970cf3"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::~spin_rw_mutex_v3" ref="9a815fb2759e55072ed413f1b6970cf3" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#9a815fb2759e55072ed413f1b6970cf3">~spin_rw_mutex_v3</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor asserts if the mutex is acquired, i.e. state is zero. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4007d6e1523dbc3c2bb7f889ab789a8a"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::lock" ref="4007d6e1523dbc3c2bb7f889ab789a8a" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#4007d6e1523dbc3c2bb7f889ab789a8a">lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire writer lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#088bb256be794cc47d3b83791632fdfc">try_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring writer lock (non-blocking).  <a href="#088bb256be794cc47d3b83791632fdfc"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f9f52ead2098eb5fb12da59d5ae53b55"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::unlock" ref="f9f52ead2098eb5fb12da59d5ae53b55" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#f9f52ead2098eb5fb12da59d5ae53b55">unlock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="13f799708ac4ca437a16be202e263e18"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::lock_read" ref="13f799708ac4ca437a16be202e263e18" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#13f799708ac4ca437a16be202e263e18">lock_read</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire reader lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html#b8667415869013f840d976aa406d385a">try_lock_read</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquiring reader lock (non-blocking).  <a href="#b8667415869013f840d976aa406d385a"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4c1e60f22c01604ca93c41b8334cc15e"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::is_rw_mutex" ref="4c1e60f22c01604ca93c41b8334cc15e" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_rw_mutex</b> = true</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="681a816fe76d6ca7752303f9dcc3c4b2"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::is_recursive_mutex" ref="681a816fe76d6ca7752303f9dcc3c4b2" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_recursive_mutex</b> = false</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4dd66d90df6247da6050e88b2aa3b820"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::is_fair_mutex" ref="4dd66d90df6247da6050e88b2aa3b820" args="" -->
+static const bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_fair_mutex</b> = false</td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html">scoped_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The scoped locking pattern.  <a href="a00202.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Fast, unfair, spinning reader-writer lock with backoff and writer-preference. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="088bb256be794cc47d3b83791632fdfc"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::try_lock" ref="088bb256be794cc47d3b83791632fdfc" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::spin_rw_mutex_v3::try_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring writer lock (non-blocking). 
+<p>
+Return true if lock acquired; false otherwise. 
+</div>
+</div><p>
+<a class="anchor" name="b8667415869013f840d976aa406d385a"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::try_lock_read" ref="b8667415869013f840d976aa406d385a" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::spin_rw_mutex_v3::try_lock_read           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Try acquiring reader lock (non-blocking). 
+<p>
+Return true if reader lock acquired; false otherwise. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00437.html">spin_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00202.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00202.html
new file mode 100644 (file)
index 0000000..ae54f67
--- /dev/null
@@ -0,0 +1,123 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::spin_rw_mutex_v3::scoped_lock Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00201.html">spin_rw_mutex_v3</a>::<a class="el" href="a00202.html">scoped_lock</a></div>
+<h1>tbb::spin_rw_mutex_v3::scoped_lock Class Reference</h1><!-- doxytag: class="tbb::spin_rw_mutex_v3::scoped_lock" -->The scoped locking pattern.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00437.html">spin_rw_mutex.h</a>&gt;</code>
+<p>
+<a href="a00070.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#d6ea60dee5563f702379bf5e51aa8806">scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct lock that has not acquired a mutex.  <a href="#d6ea60dee5563f702379bf5e51aa8806"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="42a92d4f8fdde425b111cfa8a9228071"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::scoped_lock" ref="42a92d4f8fdde425b111cfa8a9228071" args="(spin_rw_mutex &amp;m, bool write=true)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#42a92d4f8fdde425b111cfa8a9228071">scoped_lock</a> (<a class="el" href="a00201.html">spin_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d7eaaa3f2e2c5dc11e7005811b1bdd04"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::~scoped_lock" ref="d7eaaa3f2e2c5dc11e7005811b1bdd04" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#d7eaaa3f2e2c5dc11e7005811b1bdd04">~scoped_lock</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock (if lock is held). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b0b646ec5be02a127d159bbb7ca65353"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::acquire" ref="b0b646ec5be02a127d159bbb7ca65353" args="(spin_rw_mutex &amp;m, bool write=true)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#b0b646ec5be02a127d159bbb7ca65353">acquire</a> (<a class="el" href="a00201.html">spin_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Acquire lock on given mutex. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#3f0b1e3f2efab63336400348bd070226">upgrade_to_writer</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Upgrade reader to become a writer.  <a href="#3f0b1e3f2efab63336400348bd070226"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="61b14d00a78185c9b2d206ebfc379124"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::release" ref="61b14d00a78185c9b2d206ebfc379124" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#61b14d00a78185c9b2d206ebfc379124">release</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Release lock. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c2c2c38a08cb9080e87099fac3e5bc94"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::downgrade_to_reader" ref="c2c2c38a08cb9080e87099fac3e5bc94" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#c2c2c38a08cb9080e87099fac3e5bc94">downgrade_to_reader</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Downgrade writer to become a reader. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9879626968d9b9a04cd2ec0fb2e84ae1"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::try_acquire" ref="9879626968d9b9a04cd2ec0fb2e84ae1" args="(spin_rw_mutex &amp;m, bool write=true)" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00202.html#9879626968d9b9a04cd2ec0fb2e84ae1">try_acquire</a> (<a class="el" href="a00201.html">spin_rw_mutex</a> &amp;m, bool write=true)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Try acquire lock on given mutex. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The scoped locking pattern. 
+<p>
+It helps to avoid the common problem of forgetting to release lock. It also nicely provides the "node" for queuing locks. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="d6ea60dee5563f702379bf5e51aa8806"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::scoped_lock" ref="d6ea60dee5563f702379bf5e51aa8806" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">tbb::spin_rw_mutex_v3::scoped_lock::scoped_lock           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Construct lock that has not acquired a mutex. 
+<p>
+Equivalent to zero-initialization of *this. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="3f0b1e3f2efab63336400348bd070226"></a><!-- doxytag: member="tbb::spin_rw_mutex_v3::scoped_lock::upgrade_to_writer" ref="3f0b1e3f2efab63336400348bd070226" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::spin_rw_mutex_v3::scoped_lock::upgrade_to_writer           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Upgrade reader to become a writer. 
+<p>
+Returns true if the upgrade happened without re-acquiring the lock and false if opposite 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00437.html">spin_rw_mutex.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00203.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00203.html
new file mode 100644 (file)
index 0000000..5b6325f
--- /dev/null
@@ -0,0 +1,51 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::split Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00203.html">split</a></div>
+<h1>tbb::split Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::split" -->Dummy type that distinguishes splitting constructor from copy constructor.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00471.html">tbb_stddef.h</a>&gt;</code>
+<p>
+<table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Dummy type that distinguishes splitting constructor from copy constructor. 
+<p>
+See description of parallel_for and parallel_reduce for example usages. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00471.html">tbb_stddef.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.html
new file mode 100644 (file)
index 0000000..a843fea
--- /dev/null
@@ -0,0 +1,536 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::task Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00204.html">task</a></div>
+<h1>tbb::task Class Reference<br>
+<small>
+[<a class="el" href="a00285.html">Task Scheduling</a>]</small>
+</h1><!-- doxytag: class="tbb::task" -->Base class for user-defined tasks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00438.html">task.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::task:
+<p><center><img src="a00204.png" usemap="#tbb::task_map" border="0" alt=""></center>
+<map name="tbb::task_map">
+<area href="a00167.html" alt="tbb::empty_task" shape="rect" coords="0,56,97,80">
+</map>
+<a href="a00075.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">typedef internal::affinity_id&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">An id as used for specifying affinity.  <a href="#d61bb32389d3857bf7511d69beaafb76"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">executing</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task is running, and will be destroyed after method <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> completes.  <a href="#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">reexecute</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task to be rescheduled.  <a href="#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">ready</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task is in ready pool, or is going to be put there, or was just taken off.  <a href="#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">allocated</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task object is freshly allocated or recycled.  <a href="#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">freed</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task object is on free list, or is going to be put there, or was just taken off.  <a href="#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">recycle</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task to be recycled as continuation  <a href="#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a> { <br>
+&nbsp;&nbsp;<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">executing</a>, 
+<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">reexecute</a>, 
+<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">ready</a>, 
+<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">allocated</a>, 
+<br>
+&nbsp;&nbsp;<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">freed</a>, 
+<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">recycle</a>
+<br>
+ }</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enumeration of task states that the scheduler considers.  <a href="a00204.html#4a3c415562d17905390ea5b49d12293e">More...</a><br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="98245ee0473f84cb19dbbf8c81134908"></a><!-- doxytag: member="tbb::task::~task" ref="98245ee0473f84cb19dbbf8c81134908" args="()" -->
+virtual&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">~task</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destructor. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="22c298cd40937a431a06777423f002f6"></a><!-- doxytag: member="tbb::task::execute" ref="22c298cd40937a431a06777423f002f6" args="()=0" -->
+virtual <a class="el" href="a00204.html">task</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute</a> ()=0</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Should be overridden by derived classes. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">internal::allocate_continuation_proxy &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#1434c79a5138993269d034008bff7329">allocate_continuation</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns proxy for overloaded new that allocates a continuation task of *this.  <a href="#1434c79a5138993269d034008bff7329"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1ff794f7053cd9148d5f280fbf07377f"></a><!-- doxytag: member="tbb::task::allocate_child" ref="1ff794f7053cd9148d5f280fbf07377f" args="()" -->
+internal::allocate_child_proxy &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">allocate_child</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns proxy for overloaded new that allocates a child task of *this. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a> (<a class="el" href="a00204.html">task</a> &amp;t)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy a task.  <a href="#dfaacf92685e5f86393bf657b2853bf8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">recycle_as_continuation</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Change this to be a continuation of its former self.  <a href="#a67a79e18f62b43a623a00cfbd76db4c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">recycle_as_safe_continuation</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Recommended to use, safe variant of recycle_as_continuation.  <a href="#3b290d14109704e2b69dc1ac980a7a76"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="db399855177438bbc9cc61d508dae8d2"></a><!-- doxytag: member="tbb::task::recycle_as_child_of" ref="db399855177438bbc9cc61d508dae8d2" args="(task &amp;new_parent)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#db399855177438bbc9cc61d508dae8d2">recycle_as_child_of</a> (<a class="el" href="a00204.html">task</a> &amp;new_parent)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Change this to be a child of new_parent. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">recycle_to_reexecute</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Schedule this for reexecution after current <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> returns.  <a href="#4f1be9bbcdb487830dbe298b68d85144"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7653ceef53a188d05b5c779e1104b698"></a><!-- doxytag: member="tbb::task::depth" ref="7653ceef53a188d05b5c779e1104b698" args="() const " -->
+intptr_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>depth</b> () const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e864e9c80e3d79f00d77fb99f4757621"></a><!-- doxytag: member="tbb::task::set_depth" ref="e864e9c80e3d79f00d77fb99f4757621" args="(intptr_t)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>set_depth</b> (intptr_t)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c83fcc26f6513f0e06de70cbab9dee46"></a><!-- doxytag: member="tbb::task::add_to_depth" ref="c83fcc26f6513f0e06de70cbab9dee46" args="(int)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><b>add_to_depth</b> (int)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="06a4206a57e8e12a439b14d6d41cfd92"></a><!-- doxytag: member="tbb::task::set_ref_count" ref="06a4206a57e8e12a439b14d6d41cfd92" args="(int count)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">set_ref_count</a> (int count)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set reference count. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">increment_ref_count</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Atomically increment reference count.  <a href="#f5fb43c7ad0de5a4b95703cebc39e345"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">decrement_ref_count</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Atomically decrement reference count.  <a href="#ef4680f5c148020c5e7e43ddef44cd5d"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3ce28ca9baa771cfc37ecd72e69c4f3c"></a><!-- doxytag: member="tbb::task::spawn_and_wait_for_all" ref="3ce28ca9baa771cfc37ecd72e69c4f3c" args="(task &amp;child)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#3ce28ca9baa771cfc37ecd72e69c4f3c">spawn_and_wait_for_all</a> (<a class="el" href="a00204.html">task</a> &amp;child)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Similar to spawn followed by wait_for_all, but more efficient. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="894ab68378e502776d8220eea7ce9fa1"></a><!-- doxytag: member="tbb::task::spawn_and_wait_for_all" ref="894ab68378e502776d8220eea7ce9fa1" args="(task_list &amp;list)" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#894ab68378e502776d8220eea7ce9fa1">spawn_and_wait_for_all</a> (<a class="el" href="a00207.html">task_list</a> &amp;list)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Similar to spawn followed by wait_for_all, but more efficient. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#53d2615ad9c38859b4c8080936600283">wait_for_all</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Wait for reference count to become one, and set reference count to zero.  <a href="#53d2615ad9c38859b4c8080936600283"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="314e98ee4347ccec83efcb9ee22e8596"></a><!-- doxytag: member="tbb::task::parent" ref="314e98ee4347ccec83efcb9ee22e8596" args="() const " -->
+<a class="el" href="a00204.html">task</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">parent</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task on whose behalf this task is working, or NULL if this is a root. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d8c36a93f3972590fbb65ff1cef3173b"></a><!-- doxytag: member="tbb::task::context" ref="d8c36a93f3972590fbb65ff1cef3173b" args="()" -->
+<a class="el" href="a00206.html">task_group_context</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">context</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Shared context that is used to communicate asynchronous state changes. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f9169402702f56bf519448aaf34450aa"></a><!-- doxytag: member="tbb::task::is_stolen_task" ref="f9169402702f56bf519448aaf34450aa" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#f9169402702f56bf519448aaf34450aa">is_stolen_task</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if task was stolen from the task pool of another thread. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0af7b2d7e6e8b4333b2accfce3dfb374"></a><!-- doxytag: member="tbb::task::state" ref="0af7b2d7e6e8b4333b2accfce3dfb374" args="() const " -->
+<a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">state</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Current execution state. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ad774f55eaec008ae02b236423209ced"></a><!-- doxytag: member="tbb::task::ref_count" ref="ad774f55eaec008ae02b236423209ced" args="() const " -->
+int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#ad774f55eaec008ae02b236423209ced">ref_count</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The internal reference count. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c26718b3b247cd13deb1a741902e7105"></a><!-- doxytag: member="tbb::task::is_owned_by_current_thread" ref="c26718b3b247cd13deb1a741902e7105" args="() const " -->
+bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#c26718b3b247cd13deb1a741902e7105">is_owned_by_current_thread</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Obsolete, and only retained for the sake of backward compatibility. Always returns true. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="dca19d7a45487a7d67a0db517e2b57c9"></a><!-- doxytag: member="tbb::task::set_affinity" ref="dca19d7a45487a7d67a0db517e2b57c9" args="(affinity_id id)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">set_affinity</a> (<a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a> id)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set affinity for this task. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3a920a56b0bcf2801518fb45b2c9d2be"></a><!-- doxytag: member="tbb::task::affinity" ref="3a920a56b0bcf2801518fb45b2c9d2be" args="() const " -->
+<a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">affinity</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Current affinity of this task. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#713c338c8eeaebdc5a6b10a69c039b06">note_affinity</a> (<a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a> id)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Invoked by scheduler to notify task that it ran on unexpected thread.  <a href="#713c338c8eeaebdc5a6b10a69c039b06"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">cancel_group_execution</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Initiates cancellation of all tasks in this cancellation group and its subordinate groups.  <a href="#0f3fb4aac549ab642022450a4bd13326"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="025f18118c057c4c8db87ff2ce8df975"></a><!-- doxytag: member="tbb::task::is_cancelled" ref="025f18118c057c4c8db87ff2ce8df975" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">is_cancelled</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns true if the context received cancellation request. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="23acb0da0afd690da797f9f882027d34"></a><!-- doxytag: member="tbb::task::allocate_root" ref="23acb0da0afd690da797f9f882027d34" args="()" -->
+static internal::allocate_root_proxy&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#23acb0da0afd690da797f9f882027d34">allocate_root</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns proxy for overloaded new that allocates a root task. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8ccc518caf31075a3e073996d2d240a4"></a><!-- doxytag: member="tbb::task::allocate_root" ref="8ccc518caf31075a3e073996d2d240a4" args="(task_group_context &amp;ctx)" -->
+static internal::allocate_root_with_context_proxy&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">allocate_root</a> (<a class="el" href="a00206.html">task_group_context</a> &amp;ctx)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns proxy for overloaded new that allocates a root task associated with user supplied context. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ce8ce689c26a4ddf343829bc3c73290a"></a><!-- doxytag: member="tbb::task::spawn_root_and_wait" ref="ce8ce689c26a4ddf343829bc3c73290a" args="(task &amp;root)" -->
+static void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">spawn_root_and_wait</a> (<a class="el" href="a00204.html">task</a> &amp;root)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">static void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">spawn_root_and_wait</a> (<a class="el" href="a00207.html">task_list</a> &amp;root_list)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Spawn root tasks on list and wait for all of them to finish.  <a href="#c33c7edbaec67aa8a56f48986a9dc69f"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8365d063c0cc9d7bd616bca47256b93c"></a><!-- doxytag: member="tbb::task::enqueue" ref="8365d063c0cc9d7bd616bca47256b93c" args="(task &amp;t)" -->
+static void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">enqueue</a> (<a class="el" href="a00204.html">task</a> &amp;t)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Enqueue task for starvation-resistant execution. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bd43e8d6249738efafd12d6a4c72c5e3"></a><!-- doxytag: member="tbb::task::self" ref="bd43e8d6249738efafd12d6a4c72c5e3" args="()" -->
+static <a class="el" href="a00204.html">task</a> &amp;__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">self</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The innermost task being executed or destroyed by the current thread at the moment. <br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2bce8ec6e44706e70128f5cf91b76e67"></a><!-- doxytag: member="tbb::task::task" ref="2bce8ec6e44706e70128f5cf91b76e67" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Default constructor. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="81d0122aee4ddae419a743ee50d7038e"></a><!-- doxytag: member="tbb::task::interface5::internal::task_base" ref="81d0122aee4ddae419a743ee50d7038e" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>interface5::internal::task_base</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5eabadbca46f93ce6fb08290e85a0a65"></a><!-- doxytag: member="tbb::task::task_list" ref="5eabadbca46f93ce6fb08290e85a0a65" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>task_list</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e296810002652d771f692e20ca87e898"></a><!-- doxytag: member="tbb::task::internal::scheduler" ref="e296810002652d771f692e20ca87e898" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::scheduler</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="387b945c106854e1ed525f0668092930"></a><!-- doxytag: member="tbb::task::internal::allocate_root_proxy" ref="387b945c106854e1ed525f0668092930" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_root_proxy</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ecedb9436fb30df30cb3a52c64bb97e5"></a><!-- doxytag: member="tbb::task::internal::allocate_root_with_context_proxy" ref="ecedb9436fb30df30cb3a52c64bb97e5" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_root_with_context_proxy</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="9987b3b1174772e5e853bd94b9f15350"></a><!-- doxytag: member="tbb::task::internal::allocate_continuation_proxy" ref="9987b3b1174772e5e853bd94b9f15350" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_continuation_proxy</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1f9e255654086b52fe107e4d8bbc8b7d"></a><!-- doxytag: member="tbb::task::internal::allocate_child_proxy" ref="1f9e255654086b52fe107e4d8bbc8b7d" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_child_proxy</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="0e99ce0045ea3c55d239f38ea026b8a7"></a><!-- doxytag: member="tbb::task::internal::allocate_additional_child_of_proxy" ref="0e99ce0045ea3c55d239f38ea026b8a7" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_additional_child_of_proxy</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Base class for user-defined tasks. 
+<p>
+<hr><h2>Member Typedef Documentation</h2>
+<a class="anchor" name="d61bb32389d3857bf7511d69beaafb76"></a><!-- doxytag: member="tbb::task::affinity_id" ref="d61bb32389d3857bf7511d69beaafb76" args="" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef internal::affinity_id <a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">tbb::task::affinity_id</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+An id as used for specifying affinity. 
+<p>
+Guaranteed to be integral type. Value of 0 means no affinity. 
+</div>
+</div><p>
+<hr><h2>Member Enumeration Documentation</h2>
+<a class="anchor" name="4a3c415562d17905390ea5b49d12293e"></a><!-- doxytag: member="tbb::task::state_type" ref="4a3c415562d17905390ea5b49d12293e" args="" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">tbb::task::state_type</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Enumeration of task states that the scheduler considers. 
+<p>
+<dl compact><dt><b>Enumerator: </b></dt><dd>
+<table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9"></a><!-- doxytag: member="executing" ref="4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9" args="" -->executing</em>&nbsp;</td><td>
+task is running, and will be destroyed after method <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> completes. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513"></a><!-- doxytag: member="reexecute" ref="4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513" args="" -->reexecute</em>&nbsp;</td><td>
+task to be rescheduled. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a"></a><!-- doxytag: member="ready" ref="4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a" args="" -->ready</em>&nbsp;</td><td>
+task is in ready pool, or is going to be put there, or was just taken off. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c"></a><!-- doxytag: member="allocated" ref="4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c" args="" -->allocated</em>&nbsp;</td><td>
+task object is freshly allocated or recycled. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735"></a><!-- doxytag: member="freed" ref="4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735" args="" -->freed</em>&nbsp;</td><td>
+task object is on free list, or is going to be put there, or was just taken off. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d"></a><!-- doxytag: member="recycle" ref="4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d" args="" -->recycle</em>&nbsp;</td><td>
+task to be recycled as continuation </td></tr>
+</table>
+</dl>
+
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="1434c79a5138993269d034008bff7329"></a><!-- doxytag: member="tbb::task::allocate_continuation" ref="1434c79a5138993269d034008bff7329" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">internal::allocate_continuation_proxy&amp; tbb::task::allocate_continuation           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Returns proxy for overloaded new that allocates a continuation task of *this. 
+<p>
+The continuation's parent becomes the parent of *this. 
+</div>
+</div><p>
+<a class="anchor" name="0f3fb4aac549ab642022450a4bd13326"></a><!-- doxytag: member="tbb::task::cancel_group_execution" ref="0f3fb4aac549ab642022450a4bd13326" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool tbb::task::cancel_group_execution           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Initiates cancellation of all tasks in this cancellation group and its subordinate groups. 
+<p>
+<dl compact><dt><b>Returns:</b></dt><dd>false if cancellation has already been requested, true otherwise. </dd></dl>
+
+</div>
+</div><p>
+<a class="anchor" name="ef4680f5c148020c5e7e43ddef44cd5d"></a><!-- doxytag: member="tbb::task::decrement_ref_count" ref="ef4680f5c148020c5e7e43ddef44cd5d" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int tbb::task::decrement_ref_count           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Atomically decrement reference count. 
+<p>
+Has release semantics. 
+</div>
+</div><p>
+<a class="anchor" name="dfaacf92685e5f86393bf657b2853bf8"></a><!-- doxytag: member="tbb::task::destroy" ref="dfaacf92685e5f86393bf657b2853bf8" args="(task &amp;t)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task::destroy           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00204.html">task</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>t</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroy a task. 
+<p>
+Usually, calling this method is unnecessary, because a task is implicitly deleted after its <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> method runs. However, sometimes a task needs to be explicitly deallocated, such as when a root task is used as the parent in spawn_and_wait_for_all. 
+</div>
+</div><p>
+<a class="anchor" name="f5fb43c7ad0de5a4b95703cebc39e345"></a><!-- doxytag: member="tbb::task::increment_ref_count" ref="f5fb43c7ad0de5a4b95703cebc39e345" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::increment_ref_count           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Atomically increment reference count. 
+<p>
+Has acquire semantics 
+</div>
+</div><p>
+<a class="anchor" name="713c338c8eeaebdc5a6b10a69c039b06"></a><!-- doxytag: member="tbb::task::note_affinity" ref="713c338c8eeaebdc5a6b10a69c039b06" args="(affinity_id id)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual void __TBB_EXPORTED_METHOD tbb::task::note_affinity           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">affinity_id</a>&nbsp;</td>
+          <td class="paramname"> <em>id</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Invoked by scheduler to notify task that it ran on unexpected thread. 
+<p>
+Invoked before method <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> runs, if task is stolen, or task has affinity but will be executed on another thread.<p>
+The default action does nothing. 
+</div>
+</div><p>
+<a class="anchor" name="a67a79e18f62b43a623a00cfbd76db4c"></a><!-- doxytag: member="tbb::task::recycle_as_continuation" ref="a67a79e18f62b43a623a00cfbd76db4c" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::recycle_as_continuation           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Change this to be a continuation of its former self. 
+<p>
+The caller must guarantee that the task's refcount does not become zero until after the method <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> returns. Typically, this is done by having method <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> return a pointer to a child of the task. If the guarantee cannot be made, use method recycle_as_safe_continuation instead.<p>
+Because of the hazard, this method may be deprecated in the future. 
+</div>
+</div><p>
+<a class="anchor" name="3b290d14109704e2b69dc1ac980a7a76"></a><!-- doxytag: member="tbb::task::recycle_as_safe_continuation" ref="3b290d14109704e2b69dc1ac980a7a76" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::recycle_as_safe_continuation           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Recommended to use, safe variant of recycle_as_continuation. 
+<p>
+For safety, it requires additional increment of ref_count. With no decendants and ref_count of 1, it has the semantics of recycle_to_reexecute. 
+</div>
+</div><p>
+<a class="anchor" name="4f1be9bbcdb487830dbe298b68d85144"></a><!-- doxytag: member="tbb::task::recycle_to_reexecute" ref="4f1be9bbcdb487830dbe298b68d85144" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::recycle_to_reexecute           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Schedule this for reexecution after current <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">execute()</a> returns. 
+<p>
+Made obsolete by recycle_as_safe_continuation; may become deprecated. 
+</div>
+</div><p>
+<a class="anchor" name="c33c7edbaec67aa8a56f48986a9dc69f"></a><!-- doxytag: member="tbb::task::spawn_root_and_wait" ref="c33c7edbaec67aa8a56f48986a9dc69f" args="(task_list &amp;root_list)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::spawn_root_and_wait           </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="a00207.html">task_list</a> &amp;&nbsp;</td>
+          <td class="paramname"> <em>root_list</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline, static]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Spawn root tasks on list and wait for all of them to finish. 
+<p>
+If there are more tasks than worker threads, the tasks are spawned in order of front to back. 
+</div>
+</div><p>
+<a class="anchor" name="53d2615ad9c38859b4c8080936600283"></a><!-- doxytag: member="tbb::task::wait_for_all" ref="53d2615ad9c38859b4c8080936600283" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::task::wait_for_all           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Wait for reference count to become one, and set reference count to zero. 
+<p>
+Works on tasks while waiting. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00438.html">task.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.png
new file mode 100644 (file)
index 0000000..497b6ad
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00204.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00205.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00205.html
new file mode 100644 (file)
index 0000000..499db18
--- /dev/null
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::interface5::internal::task_base Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>interface5</b>::<b>internal</b>::<a class="el" href="a00205.html">task_base</a></div>
+<h1>tbb::interface5::internal::task_base Class Reference</h1><!-- doxytag: class="tbb::interface5::internal::task_base" -->Base class for methods that became static in TBB 3.0.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00438.html">task.h</a>&gt;</code>
+<p>
+<a href="a00072.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Base class for methods that became static in TBB 3.0. 
+<p>
+TBB's evolution caused the "this" argument for several methods to become obsolete. However, for backwards binary compatibility, the new methods need distinct names, otherwise the One Definition Rule would be broken. Hence the new methods are defined in this private base class, and then exposed in class task via using declarations. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00438.html">task.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00206.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00206.html
new file mode 100644 (file)
index 0000000..05dbf16
--- /dev/null
@@ -0,0 +1,231 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::task_group_context Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00206.html">task_group_context</a></div>
+<h1>tbb::task_group_context Class Reference<br>
+<small>
+[<a class="el" href="a00285.html">Task Scheduling</a>]</small>
+</h1><!-- doxytag: class="tbb::task_group_context" -->Used to form groups of tasks.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00438.html">task.h</a>&gt;</code>
+<p>
+<a href="a00073.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d5af85179a348162d4ddd66b6fc0d60fb4fda16c4f77defd3d0f6aad87fceed4"></a><!-- doxytag: member="tbb::task_group_context::isolated" ref="d5af85179a348162d4ddd66b6fc0d60fb4fda16c4f77defd3d0f6aad87fceed4" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>isolated</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d5af85179a348162d4ddd66b6fc0d60fc7f37e067ba1fca8a5c809d20522662b"></a><!-- doxytag: member="tbb::task_group_context::bound" ref="d5af85179a348162d4ddd66b6fc0d60fc7f37e067ba1fca8a5c809d20522662b" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>bound</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b8dce4661b346756cb4f584529ecd0ebf774f9c21343eba6303927b5ae0be44c"></a><!-- doxytag: member="tbb::task_group_context::exact_exception" ref="b8dce4661b346756cb4f584529ecd0ebf774f9c21343eba6303927b5ae0be44c" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>exact_exception</b> = 0x0001ul &lt;&lt; traits_offset</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b8dce4661b346756cb4f584529ecd0eb4002d8f90c8c9f7c3f80ad349aee7cce"></a><!-- doxytag: member="tbb::task_group_context::concurrent_wait" ref="b8dce4661b346756cb4f584529ecd0eb4002d8f90c8c9f7c3f80ad349aee7cce" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>concurrent_wait</b> = 0x0004ul &lt;&lt; traits_offset</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><b>default_traits</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><b>kind_type</b> { <b>isolated</b>, 
+<b>bound</b>
+ }</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><b>traits_type</b> { <b>exact_exception</b> =  0x0001ul &lt;&lt; traits_offset, 
+<b>concurrent_wait</b> =  0x0004ul &lt;&lt; traits_offset, 
+<b>default_traits</b>
+ }</td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">task_group_context</a> (kind_type relation_with_parent=bound, uintptr_t traits=default_traits)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Default &amp; binding constructor.  <a href="#19fee08fb8ac98adccfe69c1aa63c491"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#6d30d16bf1cd22f86c6afaf29c2b430c">reset</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Forcefully reinitializes the context after the task tree it was associated with is completed.  <a href="#6d30d16bf1cd22f86c6afaf29c2b430c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#8bcdfdf4e6bfb76125b6de15c00b571d">cancel_group_execution</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Initiates cancellation of all tasks in this cancellation group and its subordinate groups.  <a href="#8bcdfdf4e6bfb76125b6de15c00b571d"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4db72f16210b0a991b2c134d6763a4cc"></a><!-- doxytag: member="tbb::task_group_context::is_group_execution_cancelled" ref="4db72f16210b0a991b2c134d6763a4cc" args="() const " -->
+bool __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#4db72f16210b0a991b2c134d6763a4cc">is_group_execution_cancelled</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns true if the context received cancellation request. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#d97c8a03615594b71b4ef06ff75cf561">register_pending_exception</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Records the pending exception, and cancels the task group.  <a href="#d97c8a03615594b71b4ef06ff75cf561"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html#49a55352084fd44b8863d182e839e6dc">init</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Out-of-line part of the constructor.  <a href="#49a55352084fd44b8863d182e839e6dc"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e656073628561c8fd68f6134d527e2de"></a><!-- doxytag: member="tbb::task_group_context::task" ref="e656073628561c8fd68f6134d527e2de" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ecedb9436fb30df30cb3a52c64bb97e5"></a><!-- doxytag: member="tbb::task_group_context::internal::allocate_root_with_context_proxy" ref="ecedb9436fb30df30cb3a52c64bb97e5" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>internal::allocate_root_with_context_proxy</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Used to form groups of tasks. 
+<p>
+The context services explicit cancellation requests from user code, and unhandled exceptions intercepted during tasks execution. Intercepting an exception results in generating internal cancellation requests (which is processed in exactly the same way as external ones).<p>
+The context is associated with one or more root tasks and defines the cancellation group that includes all the descendants of the corresponding root task(s). Association is established when a context object is passed as an argument to the <a class="el" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root()</a> method. See <a class="el" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">task_group_context::task_group_context</a> for more details.<p>
+The context can be bound to another one, and other contexts can be bound to it, forming a tree-like structure: parent -&gt; this -&gt; children. Arrows here designate cancellation propagation direction. If a task in a cancellation group is canceled all the other tasks in this group and groups bound to it (as children) get canceled too.<p>
+IMPLEMENTATION NOTE: When adding new members to <a class="el" href="a00206.html">task_group_context</a> or changing types of existing ones, update the size of both padding buffers (_leading_padding and _trailing_padding) appropriately. See also VERSIONING NOTE at the constructor definition below. 
+<p>
+<hr><h2>Constructor &amp; Destructor Documentation</h2>
+<a class="anchor" name="19fee08fb8ac98adccfe69c1aa63c491"></a><!-- doxytag: member="tbb::task_group_context::task_group_context" ref="19fee08fb8ac98adccfe69c1aa63c491" args="(kind_type relation_with_parent=bound, uintptr_t traits=default_traits)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">tbb::task_group_context::task_group_context           </td>
+          <td>(</td>
+          <td class="paramtype">kind_type&nbsp;</td>
+          <td class="paramname"> <em>relation_with_parent</em> = <code>bound</code>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">uintptr_t&nbsp;</td>
+          <td class="paramname"> <em>traits</em> = <code>default_traits</code></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Default &amp; binding constructor. 
+<p>
+By default a bound context is created. That is this context will be bound (as child) to the context of the task calling task::allocate_root(this_context) method. Cancellation requests passed to the parent context are propagated to all the contexts bound to it.<p>
+If task_group_context::isolated is used as the argument, then the tasks associated with this context will never be affected by events in any other context.<p>
+Creating isolated contexts involve much less overhead, but they have limited utility. Normally when an exception occurs in an algorithm that has nested ones running, it is desirably to have all the nested algorithms canceled as well. Such a behavior requires nested algorithms to use bound contexts.<p>
+There is one good place where using isolated algorithms is beneficial. It is a master thread. That is if a particular algorithm is invoked directly from the master thread (not from a TBB task), supplying it with explicitly created isolated context will result in a faster algorithm startup.<p>
+VERSIONING NOTE: Implementation(s) of <a class="el" href="a00206.html">task_group_context</a> constructor(s) cannot be made entirely out-of-line because the run-time version must be set by the user code. This will become critically important for binary compatibility, if we ever have to change the size of the context object.<p>
+Boosting the runtime version will also be necessary whenever new fields are introduced in the currently unused padding areas or the meaning of the existing fields is changed or extended. 
+</div>
+</div><p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="8bcdfdf4e6bfb76125b6de15c00b571d"></a><!-- doxytag: member="tbb::task_group_context::cancel_group_execution" ref="8bcdfdf4e6bfb76125b6de15c00b571d" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">bool __TBB_EXPORTED_METHOD tbb::task_group_context::cancel_group_execution           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Initiates cancellation of all tasks in this cancellation group and its subordinate groups. 
+<p>
+<dl compact><dt><b>Returns:</b></dt><dd>false if cancellation has already been requested, true otherwise.</dd></dl>
+Note that canceling never fails. When false is returned, it just means that another thread (or this one) has already sent cancellation request to this context or to one of its ancestors (if this context is bound). It is guaranteed that when this method is concurrently called on the same not yet cancelled context, true will be returned by one and only one invocation. 
+</div>
+</div><p>
+<a class="anchor" name="49a55352084fd44b8863d182e839e6dc"></a><!-- doxytag: member="tbb::task_group_context::init" ref="49a55352084fd44b8863d182e839e6dc" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task_group_context::init           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [protected]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Out-of-line part of the constructor. 
+<p>
+Singled out to ensure backward binary compatibility of the future versions. 
+</div>
+</div><p>
+<a class="anchor" name="d97c8a03615594b71b4ef06ff75cf561"></a><!-- doxytag: member="tbb::task_group_context::register_pending_exception" ref="d97c8a03615594b71b4ef06ff75cf561" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task_group_context::register_pending_exception           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Records the pending exception, and cancels the task group. 
+<p>
+May be called only from inside a catch-block. If the context is already canceled, does nothing. The method brings the task group associated with this context exactly into the state it would be in, if one of its tasks threw the currently pending exception during its execution. In other words, it emulates the actions of the scheduler's dispatch loop exception handler. 
+</div>
+</div><p>
+<a class="anchor" name="6d30d16bf1cd22f86c6afaf29c2b430c"></a><!-- doxytag: member="tbb::task_group_context::reset" ref="6d30d16bf1cd22f86c6afaf29c2b430c" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task_group_context::reset           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Forcefully reinitializes the context after the task tree it was associated with is completed. 
+<p>
+Because the method assumes that all the tasks that used to be associated with this context have already finished, calling it while the context is still in use somewhere in the task hierarchy leads to undefined behavior.<p>
+IMPORTANT: This method is not thread safe!<p>
+The method does not change the context's parent if it is set. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00438.html">task.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00207.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00207.html
new file mode 100644 (file)
index 0000000..3f397f5
--- /dev/null
@@ -0,0 +1,83 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::task_list Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00207.html">task_list</a></div>
+<h1>tbb::task_list Class Reference<br>
+<small>
+[<a class="el" href="a00285.html">Task Scheduling</a>]</small>
+</h1><!-- doxytag: class="tbb::task_list" -->A list of children.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00438.html">task.h</a>&gt;</code>
+<p>
+<a href="a00077.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="416341c2047eaef50417b41eaf7e9de6"></a><!-- doxytag: member="tbb::task_list::task_list" ref="416341c2047eaef50417b41eaf7e9de6" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#416341c2047eaef50417b41eaf7e9de6">task_list</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct empty list. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6d438f1499a02db1e59c24ab6043e5ba"></a><!-- doxytag: member="tbb::task_list::~task_list" ref="6d438f1499a02db1e59c24ab6043e5ba" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#6d438f1499a02db1e59c24ab6043e5ba">~task_list</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys the list, but does not destroy the task objects. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f3ac31e092814b90929f81bb30441959"></a><!-- doxytag: member="tbb::task_list::empty" ref="f3ac31e092814b90929f81bb30441959" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#f3ac31e092814b90929f81bb30441959">empty</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">True if list if empty; false otherwise. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4cd34756bc4763dafb8c84838a0124ff"></a><!-- doxytag: member="tbb::task_list::push_back" ref="4cd34756bc4763dafb8c84838a0124ff" args="(task &amp;task)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#4cd34756bc4763dafb8c84838a0124ff">push_back</a> (<a class="el" href="a00204.html">task</a> &amp;<a class="el" href="a00204.html">task</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Push task onto back of list. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5fe85df5ed524418389d34051750347d"></a><!-- doxytag: member="tbb::task_list::pop_front" ref="5fe85df5ed524418389d34051750347d" args="()" -->
+<a class="el" href="a00204.html">task</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#5fe85df5ed524418389d34051750347d">pop_front</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Pop the front task from the list. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fce446ee13e025969945328f3ff59b95"></a><!-- doxytag: member="tbb::task_list::clear" ref="fce446ee13e025969945328f3ff59b95" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html#fce446ee13e025969945328f3ff59b95">clear</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Clear the list. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e656073628561c8fd68f6134d527e2de"></a><!-- doxytag: member="tbb::task_list::task" ref="e656073628561c8fd68f6134d527e2de" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>task</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="81d0122aee4ddae419a743ee50d7038e"></a><!-- doxytag: member="tbb::task_list::interface5::internal::task_base" ref="81d0122aee4ddae419a743ee50d7038e" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><b>interface5::internal::task_base</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A list of children. 
+<p>
+Used for method task::spawn_children 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00438.html">task.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00208.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00208.html
new file mode 100644 (file)
index 0000000..c855b71
--- /dev/null
@@ -0,0 +1,164 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::task_scheduler_init Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00208.html">task_scheduler_init</a></div>
+<h1>tbb::task_scheduler_init Class Reference<br>
+<small>
+[<a class="el" href="a00285.html">Task Scheduling</a>]</small>
+</h1><!-- doxytag: class="tbb::task_scheduler_init" -->Class representing reference to tbb scheduler.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00454.html">task_scheduler_init.h</a>&gt;</code>
+<p>
+<a href="a00084.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#d476053cc712e572554823492a5229ce">initialize</a> (int number_of_threads=<a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a>)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Ensure that scheduler exists for this thread.  <a href="#d476053cc712e572554823492a5229ce"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#d5ed214a8bb53b0466ed91ff4734b9a3">initialize</a> (int number_of_threads, stack_size_type thread_stack_size)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The overloaded method with stack size parameter.  <a href="#d5ed214a8bb53b0466ed91ff4734b9a3"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f73257e04cb7fb9bd5be2b635d9016f1"></a><!-- doxytag: member="tbb::task_scheduler_init::terminate" ref="f73257e04cb7fb9bd5be2b635d9016f1" args="()" -->
+void __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">terminate</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Inverse of method initialize. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="421600bf9bf9338bcf937063f2ff0e90"></a><!-- doxytag: member="tbb::task_scheduler_init::task_scheduler_init" ref="421600bf9bf9338bcf937063f2ff0e90" args="(int number_of_threads=automatic, stack_size_type thread_stack_size=0)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">task_scheduler_init</a> (int number_of_threads=<a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a>, stack_size_type thread_stack_size=0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Shorthand for default constructor followed by call to intialize(number_of_threads). <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="4da6c86292d80c703a66c1f6f5299488"></a><!-- doxytag: member="tbb::task_scheduler_init::~task_scheduler_init" ref="4da6c86292d80c703a66c1f6f5299488" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">~task_scheduler_init</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy scheduler for this thread if thread has no other live task_scheduler_inits. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="12752282977029f23416642bc03e8b74"></a><!-- doxytag: member="tbb::task_scheduler_init::is_active" ref="12752282977029f23416642bc03e8b74" args="() const " -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#12752282977029f23416642bc03e8b74">is_active</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns true if scheduler is active (initialized); false otherwise. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">static int __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#ba00714c33a41a3c2216f48613971cab">default_num_threads</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns the number of threads tbb scheduler would create if initialized by default.  <a href="#ba00714c33a41a3c2216f48613971cab"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8f5988e2b0fbb2d533fcbb7f2583743f"></a><!-- doxytag: member="tbb::task_scheduler_init::automatic" ref="8f5988e2b0fbb2d533fcbb7f2583743f" args="" -->
+static const int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a> = -1</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Typedef for number of threads that is automatic. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e6c860f1e559026ff3ef4599c0d6c514"></a><!-- doxytag: member="tbb::task_scheduler_init::deferred" ref="e6c860f1e559026ff3ef4599c0d6c514" args="" -->
+static const int&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">deferred</a> = -2</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Argument to <a class="el" href="a00208.html#d476053cc712e572554823492a5229ce">initialize()</a> or constructor that causes initialization to be deferred. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Class representing reference to tbb scheduler. 
+<p>
+A thread must construct a <a class="el" href="a00208.html">task_scheduler_init</a>, and keep it alive, during the time that it uses the services of class task. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="ba00714c33a41a3c2216f48613971cab"></a><!-- doxytag: member="tbb::task_scheduler_init::default_num_threads" ref="ba00714c33a41a3c2216f48613971cab" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">static int __TBB_EXPORTED_FUNC tbb::task_scheduler_init::default_num_threads           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [static]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Returns the number of threads tbb scheduler would create if initialized by default. 
+<p>
+Result returned by this method does not depend on whether the scheduler has already been initialized.<p>
+Because tbb 2.0 does not support blocking tasks yet, you may use this method to boost the number of threads in the tbb's internal pool, if your tasks are doing I/O operations. The optimal number of additional threads depends on how much time your tasks spend in the blocked state. 
+</div>
+</div><p>
+<a class="anchor" name="d5ed214a8bb53b0466ed91ff4734b9a3"></a><!-- doxytag: member="tbb::task_scheduler_init::initialize" ref="d5ed214a8bb53b0466ed91ff4734b9a3" args="(int number_of_threads, stack_size_type thread_stack_size)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task_scheduler_init::initialize           </td>
+          <td>(</td>
+          <td class="paramtype">int&nbsp;</td>
+          <td class="paramname"> <em>number_of_threads</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">stack_size_type&nbsp;</td>
+          <td class="paramname"> <em>thread_stack_size</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The overloaded method with stack size parameter. 
+<p>
+Overloading is necessary to preserve ABI compatibility 
+</div>
+</div><p>
+<a class="anchor" name="d476053cc712e572554823492a5229ce"></a><!-- doxytag: member="tbb::task_scheduler_init::initialize" ref="d476053cc712e572554823492a5229ce" args="(int number_of_threads=automatic)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_METHOD tbb::task_scheduler_init::initialize           </td>
+          <td>(</td>
+          <td class="paramtype">int&nbsp;</td>
+          <td class="paramname"> <em>number_of_threads</em> = <code><a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a></code>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Ensure that scheduler exists for this thread. 
+<p>
+A value of -1 lets tbb decide on the number of threads, which is typically the number of hardware threads. For production code, the default value of -1 should be used, particularly if the client code is mixed with third party clients that might also use tbb.<p>
+The number_of_threads is ignored if any other task_scheduler_inits currently exist. A thread may construct multiple task_scheduler_inits. Doing so does no harm because the underlying scheduler is reference counted. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00454.html">task_scheduler_init.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00209.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00209.html
new file mode 100644 (file)
index 0000000..a871982
--- /dev/null
@@ -0,0 +1,130 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tbb_allocator&lt; T &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00209.html">tbb_allocator</a></div>
+<h1>tbb::tbb_allocator&lt; T &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::tbb_allocator" -->Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00458.html">tbb_allocator.h</a>&gt;</code>
+<p>
+<a href="a00086.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="3cdd9619bc0a802763e7823068f83a4c"></a><!-- doxytag: member="tbb::tbb_allocator::value_type" ref="3cdd9619bc0a802763e7823068f83a4c" args="" -->
+typedef internal::allocator_type&lt;<br>
+ T &gt;::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f979d0e74d7c6e5aeb0d26408776a9e1"></a><!-- doxytag: member="tbb::tbb_allocator::pointer" ref="f979d0e74d7c6e5aeb0d26408776a9e1" args="" -->
+typedef value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1622504886ef265016a5821d61bf7adc"></a><!-- doxytag: member="tbb::tbb_allocator::const_pointer" ref="1622504886ef265016a5821d61bf7adc" args="" -->
+typedef const value_type *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b8e58d23c5c98fd65d4432c1e324f4c0"></a><!-- doxytag: member="tbb::tbb_allocator::reference" ref="b8e58d23c5c98fd65d4432c1e324f4c0" args="" -->
+typedef value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="532f36f99650b1c26a2b8899383c5411"></a><!-- doxytag: member="tbb::tbb_allocator::const_reference" ref="532f36f99650b1c26a2b8899383c5411" args="" -->
+typedef const value_type &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="639df0fb169cfcb94ba1b12429b736f4"></a><!-- doxytag: member="tbb::tbb_allocator::size_type" ref="639df0fb169cfcb94ba1b12429b736f4" args="" -->
+typedef size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="54828b5d0b937d608f76650a5df0e39f"></a><!-- doxytag: member="tbb::tbb_allocator::difference_type" ref="54828b5d0b937d608f76650a5df0e39f" args="" -->
+typedef ptrdiff_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="09a7f81fb2c3055aaecf058b11538544d524c717485b74aa552fb45b45562cb5"></a><!-- doxytag: member="tbb::tbb_allocator::scalable" ref="09a7f81fb2c3055aaecf058b11538544d524c717485b74aa552fb45b45562cb5" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>scalable</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="09a7f81fb2c3055aaecf058b11538544e108c3168e3be5de1d98e74723136fbf"></a><!-- doxytag: member="tbb::tbb_allocator::standard" ref="09a7f81fb2c3055aaecf058b11538544e108c3168e3be5de1d98e74723136fbf" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>standard</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">malloc_type</a> { <b>scalable</b>, 
+<b>standard</b>
+ }</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Specifies current allocator. <br></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d85cd071047b52eeb207d458d54098cc"></a><!-- doxytag: member="tbb::tbb_allocator::tbb_allocator" ref="d85cd071047b52eeb207d458d54098cc" args="(const tbb_allocator &amp;)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>tbb_allocator</b> (const <a class="el" href="a00209.html">tbb_allocator</a> &amp;)  throw ()</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="5c3cb04a50d15d38f4c66ca3e5d2f6af"></a><!-- doxytag: member="tbb::tbb_allocator::tbb_allocator" ref="5c3cb04a50d15d38f4c66ca3e5d2f6af" args="(const tbb_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb_allocator</b> (const <a class="el" href="a00209.html">tbb_allocator</a>&lt; U &gt; &amp;)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="08d3822344271d0a1c2e64e8c9c16b58"></a><!-- doxytag: member="tbb::tbb_allocator::address" ref="08d3822344271d0a1c2e64e8c9c16b58" args="(reference x) const " -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2b388f25858f783d995d61506de24656"></a><!-- doxytag: member="tbb::tbb_allocator::address" ref="2b388f25858f783d995d61506de24656" args="(const_reference x) const " -->
+const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>address</b> (const_reference x) const </td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f6cb487b1bdce0b581f265a77dca6d53"></a><!-- doxytag: member="tbb::tbb_allocator::allocate" ref="f6cb487b1bdce0b581f265a77dca6d53" args="(size_type n, const void *=0)" -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">allocate</a> (size_type n, const void *=0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Allocate space for n objects. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fdd011fdf2f9ad07006dc7c0a7ec1da2"></a><!-- doxytag: member="tbb::tbb_allocator::deallocate" ref="fdd011fdf2f9ad07006dc7c0a7ec1da2" args="(pointer p, size_type)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">deallocate</a> (pointer p, size_type)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Free previously allocated block of memory. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="f059ca2c96243024f0d562ee3a87a3a5"></a><!-- doxytag: member="tbb::tbb_allocator::max_size" ref="f059ca2c96243024f0d562ee3a87a3a5" args="() const " -->
+size_type&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">max_size</a> () const   throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Largest value for which method allocate might succeed. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ab228ab9e324ed041c2226e1d717df5f"></a><!-- doxytag: member="tbb::tbb_allocator::construct" ref="ab228ab9e324ed041c2226e1d717df5f" args="(pointer p, const value_type &amp;value)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">construct</a> (pointer p, const value_type &amp;value)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Copy-construct value at location pointed to by p. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ef133522bf55f05a605bee0763208281"></a><!-- doxytag: member="tbb::tbb_allocator::destroy" ref="ef133522bf55f05a605bee0763208281" args="(pointer p)" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#ef133522bf55f05a605bee0763208281">destroy</a> (pointer p)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroy value at location pointed to by p. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="78701e7454ef8e1a25b5acd364367080"></a><!-- doxytag: member="tbb::tbb_allocator::allocator_type" ref="78701e7454ef8e1a25b5acd364367080" args="()" -->
+static <a class="el" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">malloc_type</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html#78701e7454ef8e1a25b5acd364367080">allocator_type</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns current allocator. <br></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T&gt;<br>
+ class tbb::tbb_allocator&lt; T &gt;</h3>
+
+Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5. 
+<p>
+The class selects the best memory allocation mechanism available from scalable_malloc and standard malloc. The members are ordered the same way they are in section 20.4.1 of the ISO C++ standard. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00458.html">tbb_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00210.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00210.html
new file mode 100644 (file)
index 0000000..f542075
--- /dev/null
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tbb_allocator&lt; void &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00210.html">tbb_allocator&lt; void &gt;</a></div>
+<h1>tbb::tbb_allocator&lt; void &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::tbb_allocator&lt; void &gt;" -->Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00458.html">tbb_allocator.h</a>&gt;</code>
+<p>
+<a href="a00088.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="05f5ff584890407c8d672e4b4378744d"></a><!-- doxytag: member="tbb::tbb_allocator&lt; void &gt;::pointer" ref="05f5ff584890407c8d672e4b4378744d" args="" -->
+typedef void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="33e64ba71f06ef24a5093415185e66cb"></a><!-- doxytag: member="tbb::tbb_allocator&lt; void &gt;::const_pointer" ref="33e64ba71f06ef24a5093415185e66cb" args="" -->
+typedef const void *&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="81595a776b9d206a92caa526cad9c637"></a><!-- doxytag: member="tbb::tbb_allocator&lt; void &gt;::value_type" ref="81595a776b9d206a92caa526cad9c637" args="" -->
+typedef void&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;&gt;<br>
+ class tbb::tbb_allocator&lt; void &gt;</h3>
+
+Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00458.html">tbb_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.html
new file mode 100644 (file)
index 0000000..2aae0e1
--- /dev/null
@@ -0,0 +1,168 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tbb_exception Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00211.html">tbb_exception</a></div>
+<h1>tbb::tbb_exception Class Reference</h1><!-- doxytag: class="tbb::tbb_exception" -->Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::tbb_exception:
+<p><center><img src="a00211.png" usemap="#tbb::tbb_exception_map" border="0" alt=""></center>
+<map name="tbb::tbb_exception_map">
+<area href="a00157.html" alt="tbb::captured_exception" shape="rect" coords="0,56,248,80">
+<area href="a00176.html" alt="tbb::movable_exception< ExceptionData >" shape="rect" coords="258,56,506,80">
+</map>
+<a href="a00098.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual <a class="el" href="a00211.html">tbb_exception</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move</a> ()=0  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Creates and returns pointer to the deep copy of this exception object.  <a href="#3e3482bf264d4ca4dde046cd9c02c766"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">destroy</a> ()=0  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys objects created by the <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move()</a> method.  <a href="#66c94938eca8bf88b76f3eccaaf215d8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">virtual void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">throw_self</a> ()=0</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Throws this exception object.  <a href="#8588e07fa49692f4d734e4f2e4f048f4"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d00f6497e552fee978a02bfcbebf46e2"></a><!-- doxytag: member="tbb::tbb_exception::name" ref="d00f6497e552fee978a02bfcbebf46e2" args="() const =0" -->
+virtual const char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#d00f6497e552fee978a02bfcbebf46e2">name</a> () const =0  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns RTTI name of the originally intercepted exception. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="e8157689ecb66bc6c72d3618bf3cc371"></a><!-- doxytag: member="tbb::tbb_exception::what" ref="e8157689ecb66bc6c72d3618bf3cc371" args="() const =0" -->
+virtual const char *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">what</a> () const =0  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Returns the result of originally intercepted exception's <a class="el" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">what()</a> method. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">operator delete</a> (void *p)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. 
+<p>
+If an unhandled exception of the type derived from <a class="el" href="a00211.html">tbb::tbb_exception</a> is intercepted by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in the root thread. The root thread is the thread that has started the outermost algorithm or root task sharing the same <a class="el" href="a00206.html">task_group_context</a> with the guilty algorithm/task (the one that threw the exception first).<p>
+Note: when documentation mentions workers with respect to exception handling, masters are implied as well, because they are completely equivalent in this context. Consequently a root thread can be master or worker thread.<p>
+NOTE: In case of nested algorithms or complex task hierarchies when the nested levels share (explicitly or by means of implicit inheritance) the task group context of the outermost level, the exception may be (re-)thrown multiple times (ultimately - in each worker on each nesting level) before reaching the root thread at the outermost level. IMPORTANT: if you intercept an exception derived from this class on a nested level, you must re-throw it in the catch block by means of the "throw;" operator.<p>
+TBB provides two implementations of this interface: <a class="el" href="a00157.html">tbb::captured_exception</a> and template class <a class="el" href="a00176.html">tbb::movable_exception</a>. See their declarations for more info. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="66c94938eca8bf88b76f3eccaaf215d8"></a><!-- doxytag: member="tbb::tbb_exception::destroy" ref="66c94938eca8bf88b76f3eccaaf215d8" args="()=0" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual void tbb::tbb_exception::destroy           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [pure virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroys objects created by the <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move()</a> method. 
+<p>
+Frees memory and calls destructor for this exception object. Can and must be used only on objects created by the move method. 
+<p>
+Implemented in <a class="el" href="a00157.html#93d875d3555502ff6f18513525de204c">tbb::captured_exception</a>, and <a class="el" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">tbb::movable_exception&lt; ExceptionData &gt;</a>.
+</div>
+</div><p>
+<a class="anchor" name="3e3482bf264d4ca4dde046cd9c02c766"></a><!-- doxytag: member="tbb::tbb_exception::move" ref="3e3482bf264d4ca4dde046cd9c02c766" args="()=0" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual <a class="el" href="a00211.html">tbb_exception</a>* tbb::tbb_exception::move           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()<code> [pure virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Creates and returns pointer to the deep copy of this exception object. 
+<p>
+Move semantics is allowed. 
+<p>
+Implemented in <a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">tbb::captured_exception</a>, and <a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">tbb::movable_exception&lt; ExceptionData &gt;</a>.
+</div>
+</div><p>
+<a class="anchor" name="3f2da7f3d8a6e4c1df522af1213afb5a"></a><!-- doxytag: member="tbb::tbb_exception::operator delete" ref="3f2da7f3d8a6e4c1df522af1213afb5a" args="(void *p)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::tbb_exception::operator delete           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>p</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [inline]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Operator delete is provided only to allow using existing smart pointers with TBB exception objects obtained as the result of applying <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move()</a> operation on an exception thrown out of TBB scheduler.<p>
+When overriding method <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move()</a> make sure to override operator delete as well if memory is allocated not by TBB's scalable allocator. 
+</div>
+</div><p>
+<a class="anchor" name="8588e07fa49692f4d734e4f2e4f048f4"></a><!-- doxytag: member="tbb::tbb_exception::throw_self" ref="8588e07fa49692f4d734e4f2e4f048f4" args="()=0" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">virtual void tbb::tbb_exception::throw_self           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"><code> [pure virtual]</code></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Throws this exception object. 
+<p>
+Make sure that if you have several levels of derivation from this interface you implement or override this method on the most derived level. The implementation is as simple as "throw *this;". Failure to do this will result in exception of a base class type being thrown. 
+<p>
+Implemented in <a class="el" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">tbb::captured_exception</a>, and <a class="el" href="a00176.html#17cffba35811c92b7e65d63506b69602">tbb::movable_exception&lt; ExceptionData &gt;</a>.
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.png
new file mode 100644 (file)
index 0000000..496da7e
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00211.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00212.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00212.html
new file mode 100644 (file)
index 0000000..798bc8c
--- /dev/null
@@ -0,0 +1,89 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::internal::tbb_exception_ptr Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>internal</b>::<a class="el" href="a00212.html">tbb_exception_ptr</a></div>
+<h1>tbb::internal::tbb_exception_ptr Class Reference</h1><!-- doxytag: class="tbb::internal::tbb_exception_ptr" -->Exception container that preserves the exact copy of the original exception.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00464.html">tbb_exception.h</a>&gt;</code>
+<p>
+<a href="a00101.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00212.html#921875bbacd2c8a5f324c7da7a415262">destroy</a> ()  throw ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Destroys this objects.  <a href="#921875bbacd2c8a5f324c7da7a415262"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="292832fd5c523e3d8081a22247840a1d"></a><!-- doxytag: member="tbb::internal::tbb_exception_ptr::throw_self" ref="292832fd5c523e3d8081a22247840a1d" args="()" -->
+void&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00212.html#292832fd5c523e3d8081a22247840a1d">throw_self</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Throws the contained exception . <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="65083750bebe799d3fe8143c794523bb"></a><!-- doxytag: member="tbb::internal::tbb_exception_ptr::allocate" ref="65083750bebe799d3fe8143c794523bb" args="()" -->
+static <a class="el" href="a00212.html">tbb_exception_ptr</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><b>allocate</b> ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d1111b00ba54b7fe35e6d3a0f21de287"></a><!-- doxytag: member="tbb::internal::tbb_exception_ptr::allocate" ref="d1111b00ba54b7fe35e6d3a0f21de287" args="(const tbb_exception &amp;tag)" -->
+static <a class="el" href="a00212.html">tbb_exception_ptr</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><b>allocate</b> (const <a class="el" href="a00211.html">tbb_exception</a> &amp;tag)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c35e5db8e9cdff5d1387db5b0bad2e4a"></a><!-- doxytag: member="tbb::internal::tbb_exception_ptr::allocate" ref="c35e5db8e9cdff5d1387db5b0bad2e4a" args="(captured_exception &amp;src)" -->
+static <a class="el" href="a00212.html">tbb_exception_ptr</a> *&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00212.html#c35e5db8e9cdff5d1387db5b0bad2e4a">allocate</a> (<a class="el" href="a00157.html">captured_exception</a> &amp;src)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">This overload uses move semantics (i.e. it empties src). <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Exception container that preserves the exact copy of the original exception. 
+<p>
+This class can be used only when the appropriate runtime support (mandated by C++0x) is present 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="921875bbacd2c8a5f324c7da7a415262"></a><!-- doxytag: member="tbb::internal::tbb_exception_ptr::destroy" ref="921875bbacd2c8a5f324c7da7a415262" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::internal::tbb_exception_ptr::destroy           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%">  throw ()</td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Destroys this objects. 
+<p>
+Note that objects of this type can be created only by the allocate() method. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00464.html">tbb_exception.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00213.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00213.html
new file mode 100644 (file)
index 0000000..f885769
--- /dev/null
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tbb_hash_compare&lt; Key &gt; Struct Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00213.html">tbb_hash_compare</a></div>
+<h1>tbb::tbb_hash_compare&lt; Key &gt; Struct Template Reference</h1><!-- doxytag: class="tbb::tbb_hash_compare" -->hash_compare that is default argument for concurrent_hash_map  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00331.html">concurrent_hash_map.h</a>&gt;</code>
+<p>
+<a href="a00012.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c35d106ad65aa6ca5ba9008c1c86f80d"></a><!-- doxytag: member="tbb::tbb_hash_compare::hash" ref="c35d106ad65aa6ca5ba9008c1c86f80d" args="(const Key &amp;a)" -->
+static size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>hash</b> (const Key &amp;a)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b4d302049aabea046e392ddfad8b3f06"></a><!-- doxytag: member="tbb::tbb_hash_compare::equal" ref="b4d302049aabea046e392ddfad8b3f06" args="(const Key &amp;a, const Key &amp;b)" -->
+static bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>equal</b> (const Key &amp;a, const Key &amp;b)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename Key&gt;<br>
+ struct tbb::tbb_hash_compare&lt; Key &gt;</h3>
+
+hash_compare that is default argument for concurrent_hash_map 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00331.html">concurrent_hash_map.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.html
new file mode 100644 (file)
index 0000000..a18d000
--- /dev/null
@@ -0,0 +1,123 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::thread_bound_filter Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00214.html">thread_bound_filter</a></div>
+<h1>tbb::thread_bound_filter Class Reference<br>
+<small>
+[<a class="el" href="a00280.html">Algorithms</a>]</small>
+</h1><!-- doxytag: class="tbb::thread_bound_filter" --><!-- doxytag: inherits="tbb::filter" -->A stage in a pipeline served by a user thread.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00419.html">pipeline.h</a>&gt;</code>
+<p>
+<p>Inheritance diagram for tbb::thread_bound_filter:
+<p><center><img src="a00214.png" usemap="#tbb::thread_bound_filter_map" border="0" alt=""></center>
+<map name="tbb::thread_bound_filter_map">
+<area href="a00169.html" alt="tbb::filter" shape="rect" coords="0,0,145,24">
+</map>
+<a href="a00050.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="902c4645e624e8f589b89841df5331de0c135a0274225f59d2b2a682d1103f23"></a><!-- doxytag: member="tbb::thread_bound_filter::success" ref="902c4645e624e8f589b89841df5331de0c135a0274225f59d2b2a682d1103f23" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>success</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="902c4645e624e8f589b89841df5331de5243cf0030982cfa4ac1a8e97acb39f4"></a><!-- doxytag: member="tbb::thread_bound_filter::item_not_available" ref="902c4645e624e8f589b89841df5331de5243cf0030982cfa4ac1a8e97acb39f4" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>item_not_available</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="902c4645e624e8f589b89841df5331ded4085a8b251604b51c0f19602eeef09f"></a><!-- doxytag: member="tbb::thread_bound_filter::end_of_stream" ref="902c4645e624e8f589b89841df5331ded4085a8b251604b51c0f19602eeef09f" args="" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>end_of_stream</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><b>result_type</b> { <b>success</b>, 
+<b>item_not_available</b>, 
+<b>end_of_stream</b>
+ }</td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">result_type __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00214.html#c4f90f2c771bce748beb9be734fa286c">try_process_item</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">If a data item is available, invoke operator() on that item.  <a href="#c4f90f2c771bce748beb9be734fa286c"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">result_type __TBB_EXPORTED_METHOD&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00214.html#5e726bdc7fbd924c0b07bd558b1d4d5d">process_item</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Wait until a data item becomes available, and invoke operator() on that item.  <a href="#5e726bdc7fbd924c0b07bd558b1d4d5d"></a><br></td></tr>
+<tr><td colspan="2"><br><h2>Protected Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="413fad2ade743d9af5dcabc56aedf4fe"></a><!-- doxytag: member="tbb::thread_bound_filter::thread_bound_filter" ref="413fad2ade743d9af5dcabc56aedf4fe" args="(mode filter_mode)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>thread_bound_filter</b> (mode filter_mode)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+A stage in a pipeline served by a user thread. 
+<p>
+<hr><h2>Member Function Documentation</h2>
+<a class="anchor" name="5e726bdc7fbd924c0b07bd558b1d4d5d"></a><!-- doxytag: member="tbb::thread_bound_filter::process_item" ref="5e726bdc7fbd924c0b07bd558b1d4d5d" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">result_type __TBB_EXPORTED_METHOD tbb::thread_bound_filter::process_item           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Wait until a data item becomes available, and invoke operator() on that item. 
+<p>
+This interface is blocking. Returns 'success' if an item was processed. Returns 'end_of_stream' if there are no more items to process. Never returns 'item_not_available', as it blocks until another return condition applies. 
+</div>
+</div><p>
+<a class="anchor" name="c4f90f2c771bce748beb9be734fa286c"></a><!-- doxytag: member="tbb::thread_bound_filter::try_process_item" ref="c4f90f2c771bce748beb9be734fa286c" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">result_type __TBB_EXPORTED_METHOD tbb::thread_bound_filter::try_process_item           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+If a data item is available, invoke operator() on that item. 
+<p>
+This interface is non-blocking. Returns 'success' if an item was processed. Returns 'item_not_available' if no item can be processed now but more may arrive in the future, or if token limit is reached. Returns 'end_of_stream' if there are no more items to process. 
+</div>
+</div><p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00419.html">pipeline.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.png
new file mode 100644 (file)
index 0000000..3327bd5
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00214.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00215.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00215.html
new file mode 100644 (file)
index 0000000..d7560a3
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tick_count Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00215.html">tick_count</a></div>
+<h1>tbb::tick_count Class Reference<br>
+<small>
+[<a class="el" href="a00284.html">Timing</a>]</small>
+</h1><!-- doxytag: class="tbb::tick_count" -->Absolute timestamp.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00488.html">tick_count.h</a>&gt;</code>
+<p>
+<a href="a00115.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="34593326ae4191e02a13c7cbdab9de4c"></a><!-- doxytag: member="tbb::tick_count::tick_count" ref="34593326ae4191e02a13c7cbdab9de4c" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">tick_count</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct an absolute timestamp initialized to zero. <br></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fb7f78ca61cf28398645ace66e284473"></a><!-- doxytag: member="tbb::tick_count::now" ref="fb7f78ca61cf28398645ace66e284473" args="()" -->
+static <a class="el" href="a00215.html">tick_count</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00215.html#fb7f78ca61cf28398645ace66e284473">now</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return current time. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="09dde78a4100800c11bb883d6204b586"></a><!-- doxytag: member="tbb::tick_count::operator-" ref="09dde78a4100800c11bb883d6204b586" args="(const tick_count &amp;t1, const tick_count &amp;t0)" -->
+<a class="el" href="a00216.html">interval_t</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00215.html#09dde78a4100800c11bb883d6204b586">operator-</a> (const <a class="el" href="a00215.html">tick_count</a> &amp;t1, const <a class="el" href="a00215.html">tick_count</a> &amp;t0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Subtract two timestamps to get the time interval between. <br></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html">interval_t</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Relative time interval.  <a href="a00216.html#_details">More...</a><br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Absolute timestamp. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00488.html">tick_count.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00216.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00216.html
new file mode 100644 (file)
index 0000000..d5d0180
--- /dev/null
@@ -0,0 +1,83 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::tick_count::interval_t Class Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00215.html">tick_count</a>::<a class="el" href="a00216.html">interval_t</a></div>
+<h1>tbb::tick_count::interval_t Class Reference</h1><!-- doxytag: class="tbb::tick_count::interval_t" -->Relative time interval.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00488.html">tick_count.h</a>&gt;</code>
+<p>
+<a href="a00116.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="75a9a0949f8a8a84d6758835f1b48dad"></a><!-- doxytag: member="tbb::tick_count::interval_t::interval_t" ref="75a9a0949f8a8a84d6758835f1b48dad" args="()" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct a time interval representing zero time duration. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="1a21a428e00cced2e6a49e0f5f2258bf"></a><!-- doxytag: member="tbb::tick_count::interval_t::interval_t" ref="1a21a428e00cced2e6a49e0f5f2258bf" args="(double sec)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#1a21a428e00cced2e6a49e0f5f2258bf">interval_t</a> (double sec)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Construct a time interval representing sec seconds time duration. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="d5d8429c0bc59cf6131b2abc7929fa59"></a><!-- doxytag: member="tbb::tick_count::interval_t::seconds" ref="d5d8429c0bc59cf6131b2abc7929fa59" args="() const " -->
+double&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">seconds</a> () const </td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Return the length of a time interval in seconds. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cd9814947902e26463a69a111530f81b"></a><!-- doxytag: member="tbb::tick_count::interval_t::operator+=" ref="cd9814947902e26463a69a111530f81b" args="(const interval_t &amp;i)" -->
+<a class="el" href="a00216.html">interval_t</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#cd9814947902e26463a69a111530f81b">operator+=</a> (const <a class="el" href="a00216.html">interval_t</a> &amp;i)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Accumulation operator. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="35ff7eaf7c2031b4a991402ac9ecb940"></a><!-- doxytag: member="tbb::tick_count::interval_t::operator-=" ref="35ff7eaf7c2031b4a991402ac9ecb940" args="(const interval_t &amp;i)" -->
+<a class="el" href="a00216.html">interval_t</a> &amp;&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">operator-=</a> (const <a class="el" href="a00216.html">interval_t</a> &amp;i)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Subtraction operator. <br></td></tr>
+<tr><td colspan="2"><br><h2>Friends</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="cc3a7ad7000317951ce61f706163efe8"></a><!-- doxytag: member="tbb::tick_count::interval_t::tbb::tick_count" ref="cc3a7ad7000317951ce61f706163efe8" args="" -->
+class&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#cc3a7ad7000317951ce61f706163efe8">tbb::tick_count</a></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="09dde78a4100800c11bb883d6204b586"></a><!-- doxytag: member="tbb::tick_count::interval_t::operator-" ref="09dde78a4100800c11bb883d6204b586" args="(const tick_count &amp;t1, const tick_count &amp;t0)" -->
+<a class="el" href="a00216.html">interval_t</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#09dde78a4100800c11bb883d6204b586">operator-</a> (const <a class="el" href="a00215.html">tick_count</a> &amp;t1, const <a class="el" href="a00215.html">tick_count</a> &amp;t0)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Extract the intervals from the tick_counts and subtract them. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5871ead1ca230efbe52a5008470e6428"></a><!-- doxytag: member="tbb::tick_count::interval_t::operator+" ref="5871ead1ca230efbe52a5008470e6428" args="(const interval_t &amp;i, const interval_t &amp;j)" -->
+<a class="el" href="a00216.html">interval_t</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#5871ead1ca230efbe52a5008470e6428">operator+</a> (const <a class="el" href="a00216.html">interval_t</a> &amp;i, const <a class="el" href="a00216.html">interval_t</a> &amp;j)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Add two intervals. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="fa509691e1d689830931e36edd274f76"></a><!-- doxytag: member="tbb::tick_count::interval_t::operator-" ref="fa509691e1d689830931e36edd274f76" args="(const interval_t &amp;i, const interval_t &amp;j)" -->
+<a class="el" href="a00216.html">interval_t</a>&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00216.html#fa509691e1d689830931e36edd274f76">operator-</a> (const <a class="el" href="a00216.html">interval_t</a> &amp;i, const <a class="el" href="a00216.html">interval_t</a> &amp;j)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Subtract two intervals. <br></td></tr>
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+Relative time interval. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00488.html">tick_count.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00217.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00217.html
new file mode 100644 (file)
index 0000000..7262ae9
--- /dev/null
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::internal::work_around_alignment_bug&lt; Size, T &gt; Struct Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<b>internal</b>::<a class="el" href="a00217.html">work_around_alignment_bug</a></div>
+<h1>tbb::internal::work_around_alignment_bug&lt; Size, T &gt; Struct Template Reference</h1><!-- doxytag: class="tbb::internal::work_around_alignment_bug" -->Work around for bug in GNU 3.2 and MSVC compilers.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00465.html">tbb_machine.h</a>&gt;</code>
+<p>
+<a href="a00109.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Static Public Attributes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="137d1b1dd94016bc52e423dc33c4fd1d"></a><!-- doxytag: member="tbb::internal::work_around_alignment_bug::alignment" ref="137d1b1dd94016bc52e423dc33c4fd1d" args="" -->
+static const size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>alignment</b> = __alignof(T)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;size_t Size, typename T&gt;<br>
+ struct tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</h3>
+
+Work around for bug in GNU 3.2 and MSVC compilers. 
+<p>
+Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). 
+<p>
+<hr>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="a00465.html">tbb_machine.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00218.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00218.html
new file mode 100644 (file)
index 0000000..bc4fba9
--- /dev/null
@@ -0,0 +1,93 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::zero_allocator&lt; T, Allocator &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00218.html">zero_allocator</a></div>
+<h1>tbb::zero_allocator&lt; T, Allocator &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::zero_allocator" -->Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00458.html">tbb_allocator.h</a>&gt;</code>
+<p>
+<a href="a00090.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="77c87446a645cc3dbeba6fc82cc7eec0"></a><!-- doxytag: member="tbb::zero_allocator::base_allocator_type" ref="77c87446a645cc3dbeba6fc82cc7eec0" args="" -->
+typedef Allocator&lt; T &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>base_allocator_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2eb3bf02103f455e0614e0a392388428"></a><!-- doxytag: member="tbb::zero_allocator::value_type" ref="2eb3bf02103f455e0614e0a392388428" args="" -->
+typedef base_allocator_type::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="54d0a81317a90278dc492d44cc7dcdfc"></a><!-- doxytag: member="tbb::zero_allocator::pointer" ref="54d0a81317a90278dc492d44cc7dcdfc" args="" -->
+typedef base_allocator_type::pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="50536b451124814064b0e8ff7b6cea21"></a><!-- doxytag: member="tbb::zero_allocator::const_pointer" ref="50536b451124814064b0e8ff7b6cea21" args="" -->
+typedef base_allocator_type::const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2ef488ecc55bf4a619f68b021d5b93e2"></a><!-- doxytag: member="tbb::zero_allocator::reference" ref="2ef488ecc55bf4a619f68b021d5b93e2" args="" -->
+typedef base_allocator_type::reference&nbsp;</td><td class="memItemRight" valign="bottom"><b>reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="31f0de5db53e4c219e8548078572e1ff"></a><!-- doxytag: member="tbb::zero_allocator::const_reference" ref="31f0de5db53e4c219e8548078572e1ff" args="" -->
+typedef base_allocator_type::const_reference&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_reference</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="55acbfec8d7fd904c9b2040816522084"></a><!-- doxytag: member="tbb::zero_allocator::size_type" ref="55acbfec8d7fd904c9b2040816522084" args="" -->
+typedef base_allocator_type::size_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="c54d45a74bd3b48ff8db174c266ddede"></a><!-- doxytag: member="tbb::zero_allocator::difference_type" ref="c54d45a74bd3b48ff8db174c266ddede" args="" -->
+typedef base_allocator_type::difference_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>difference_type</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Public Member Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8c48c0436e3239be8f07c53c7f6658e4"></a><!-- doxytag: member="tbb::zero_allocator::zero_allocator" ref="8c48c0436e3239be8f07c53c7f6658e4" args="(const zero_allocator &amp;a)" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>zero_allocator</b> (const <a class="el" href="a00218.html">zero_allocator</a> &amp;a)  throw ()</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="67685c1cfcd3b5f81ac8f3c8dd1375b9"></a><!-- doxytag: member="tbb::zero_allocator::zero_allocator" ref="67685c1cfcd3b5f81ac8f3c8dd1375b9" args="(const zero_allocator&lt; U &gt; &amp;a)" -->
+template&lt;typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>zero_allocator</b> (const <a class="el" href="a00218.html">zero_allocator</a>&lt; U &gt; &amp;a)  throw ()</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="b6e7eb50d9faeca418d56a9758357377"></a><!-- doxytag: member="tbb::zero_allocator::allocate" ref="b6e7eb50d9faeca418d56a9758357377" args="(const size_type n, const void *hint=0)" -->
+pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>allocate</b> (const size_type n, const void *hint=0)</td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;typename T, template&lt; typename X &gt; class Allocator = tbb_allocator&gt;<br>
+ class tbb::zero_allocator&lt; T, Allocator &gt;</h3>
+
+Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5. 
+<p>
+The class is an adapter over an actual allocator that fills the allocation using memset function with template argument C as the value. The members are ordered the same way they are in section 20.4.1 of the ISO C++ standard. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00458.html">tbb_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00219.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00219.html
new file mode 100644 (file)
index 0000000..888f156
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::zero_allocator&lt; void, Allocator &gt; Class Template Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00219.html">zero_allocator&lt; void, Allocator &gt;</a></div>
+<h1>tbb::zero_allocator&lt; void, Allocator &gt; Class Template Reference<br>
+<small>
+[<a class="el" href="a00282.html">Memory Allocation</a>]</small>
+</h1><!-- doxytag: class="tbb::zero_allocator&lt; void, Allocator &gt;" -->Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  
+<a href="#_details">More...</a>
+<p>
+<code>#include &lt;<a class="el" href="a00458.html">tbb_allocator.h</a>&gt;</code>
+<p>
+<a href="a00092.html">List of all members.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Public Types</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ce4f3bf06e8aaf8020fa443297eff5f7"></a><!-- doxytag: member="tbb::zero_allocator&lt; void, Allocator &gt;::base_allocator_type" ref="ce4f3bf06e8aaf8020fa443297eff5f7" args="" -->
+typedef Allocator&lt; void &gt;&nbsp;</td><td class="memItemRight" valign="bottom"><b>base_allocator_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="5cd644eeecd6d729c848c8e99ffd4592"></a><!-- doxytag: member="tbb::zero_allocator&lt; void, Allocator &gt;::value_type" ref="5cd644eeecd6d729c848c8e99ffd4592" args="" -->
+typedef base_allocator_type::value_type&nbsp;</td><td class="memItemRight" valign="bottom"><b>value_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="8f68aa346a17c94f8c5c7945a2c50858"></a><!-- doxytag: member="tbb::zero_allocator&lt; void, Allocator &gt;::pointer" ref="8f68aa346a17c94f8c5c7945a2c50858" args="" -->
+typedef base_allocator_type::pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>pointer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ae95a5761371ee7b61f2c339385684e9"></a><!-- doxytag: member="tbb::zero_allocator&lt; void, Allocator &gt;::const_pointer" ref="ae95a5761371ee7b61f2c339385684e9" args="" -->
+typedef base_allocator_type::const_pointer&nbsp;</td><td class="memItemRight" valign="bottom"><b>const_pointer</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>rebind</b></td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<h3>template&lt;template&lt; typename T &gt; class Allocator&gt;<br>
+ class tbb::zero_allocator&lt; void, Allocator &gt;</h3>
+
+Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1. 
+<p>
+<hr>The documentation for this class was generated from the following file:<ul>
+<li><a class="el" href="a00458.html">tbb_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00253.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00253.html
new file mode 100644 (file)
index 0000000..2ef8f29
--- /dev/null
@@ -0,0 +1,77 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>scalable_allocator.h File Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>scalable_allocator.h File Reference</h1>
+<p>
+<a href="a00435.html">Go to the source code of this file.</a><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Namespaces</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">namespace &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html">tbb</a></td></tr>
+
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00196.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>tbb::scalable_allocator&lt; T &gt;::rebind&lt; U &gt;</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00197.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><b>tbb::scalable_allocator&lt; void &gt;::rebind&lt; U &gt;</b></td></tr>
+
+<tr><td colspan="2"><br><h2>Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_malloc</a> (size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_free</a> (void *ptr)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g951bbbbd2d041acb59ba5fa910b52543">scalable_realloc</a> (void *ptr, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g3f5a2fde0bcaa3eda35be32c8658f444">scalable_calloc</a> (size_t nobj, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g05dcec987480bb2c82ecdead6a085899">scalable_posix_memalign</a> (void **memptr, size_t alignment, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gc1c7aaa1fe85c17ba5a3a96f7e8d89e7">scalable_aligned_malloc</a> (size_t size, size_t alignment)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gbaea91376be80dfabd7c93eaffd9abaa">scalable_aligned_realloc</a> (void *ptr, size_t size, size_t alignment)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g65a20e812012f15ec7442d5b45d0cba5">scalable_aligned_free</a> (void *ptr)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">size_t __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g0965ce1b4b7835f92869c7fd867265f7">scalable_msize</a> (void *ptr)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="906ebb461ecb0446989739fd0399e4b8"></a><!-- doxytag: member="scalable_allocator.h::operator==" ref="906ebb461ecb0446989739fd0399e4b8" args="(const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::operator==</b> (const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="a92757aca0a69082eb8dc223eb257433"></a><!-- doxytag: member="scalable_allocator.h::operator!=" ref="a92757aca0a69082eb8dc223eb257433" args="(const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::operator!=</b> (const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00272.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00272.html
new file mode 100644 (file)
index 0000000..2149aba
--- /dev/null
@@ -0,0 +1,668 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb Namespace Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb Namespace Reference</h1>The namespace tbb contains all components of the library.  
+<a href="#_details">More...</a>
+<p>
+<table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00146.html">aligned_space</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Block of space aligned sufficiently to construct an array T with N elements.  <a href="a00146.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00147.html">atomic</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Primary template for atomic.  <a href="a00147.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00148.html">atomic&lt; void * &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Specialization for <a class="el" href="a00148.html">atomic&lt;void*&gt;</a>, for sake of not allowing arithmetic or operator-&gt;.  <a href="a00148.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html">blocked_range</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A range over which to iterate.  <a href="a00152.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html">blocked_range2d</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A 2-dimensional range that models the Range concept.  <a href="a00153.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html">blocked_range3d</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A 3-dimensional range that models the Range concept.  <a href="a00154.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html">cache_aligned_allocator</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00155.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00156.html">cache_aligned_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00156.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00158.html">combinable</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Thread-local storage with optional reduction.  <a href="a00158.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00213.html">tbb_hash_compare</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">hash_compare that is default argument for concurrent_hash_map  <a href="a00213.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html">concurrent_bounded_queue</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A high-performance thread-safe blocking concurrent bounded queue.  <a href="a00159.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>tbb_hash</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html">concurrent_vector</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Concurrent vector container.  <a href="a00166.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html">mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Wrapper around the platform's native reader-writer lock.  <a href="a00177.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00179.html">null_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A mutex which does nothing.  <a href="a00179.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00181.html">null_rw_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A rw mutex which does nothing.  <a href="a00181.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00183.html">parallel_do_feeder</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Class the user supplied algorithm body uses to add new tasks.  <a href="a00183.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00186.html">pre_scan_tag</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to indicate that the initial scan is being performed.  <a href="a00186.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00171.html">final_scan_tag</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to indicate that the final scan is being performed.  <a href="a00171.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html">parallel_while</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a stream, with optional addition of more work.  <a href="a00184.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00198.html">simple_partitioner</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A simple partitioner.  <a href="a00198.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00150.html">auto_partitioner</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">An auto partitioner.  <a href="a00150.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00145.html">affinity_partitioner</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">An affinity partitioner.  <a href="a00145.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html">filter</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A stage in a pipeline.  <a href="a00169.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00214.html">thread_bound_filter</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A stage in a pipeline served by a user thread.  <a href="a00214.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html">pipeline</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A processing pipeline that applies filters to items.  <a href="a00185.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00187.html">queuing_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Queuing lock with local-only spinning.  <a href="a00187.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00189.html">queuing_rw_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Reader-writer lock with local-only spinning.  <a href="a00189.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html">recursive_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Mutex that allows recursive mutex acquisition.  <a href="a00194.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html">scalable_allocator</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00196.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00197.html">scalable_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00197.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html">spin_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A lock that occupies a single byte.  <a href="a00199.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html">spin_rw_mutex_v3</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Fast, unfair, spinning reader-writer lock with backoff and writer-preference.  <a href="a00201.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html">task_group_context</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to form groups of tasks.  <a href="a00206.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html">task</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Base class for user-defined tasks.  <a href="a00204.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00167.html">empty_task</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task that does nothing. Useful for synchronization.  <a href="a00167.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html">task_list</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A list of children.  <a href="a00207.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>task_handle</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>task_group</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><b>structured_task_group</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html">task_scheduler_init</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Class representing reference to tbb scheduler.  <a href="a00208.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html">tbb_allocator</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00209.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00210.html">tbb_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00210.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00218.html">zero_allocator</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00218.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00219.html">zero_allocator&lt; void, Allocator &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00219.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00151.html">bad_last_alloc</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Exception for concurrent containers.  <a href="a00151.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00173.html">improper_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Exception for PPL locks.  <a href="a00173.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00175.html">missing_wait</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Exception for missing wait on structured_task_group.  <a href="a00175.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00174.html">invalid_multiple_scheduling</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Exception for repeated scheduling of the same task_handle.  <a href="a00174.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00211.html">tbb_exception</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.  <a href="a00211.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00157.html">captured_exception</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">This class is used by TBB to propagate information about unhandled exceptions into the root thread.  <a href="a00157.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00176.html">movable_exception</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread.  <a href="a00176.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00203.html">split</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Dummy type that distinguishes splitting constructor from copy constructor.  <a href="a00203.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00215.html">tick_count</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Absolute timestamp.  <a href="a00215.html#_details">More...</a><br></td></tr>
+<tr><td colspan="2"><br><h2>Namespaces</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">namespace &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00279.html">strict_ppl</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">For internal use only. <br></td></tr>
+
+<p>
+<tr><td colspan="2"><br><h2>parallel_do</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="parallel_do_body_req.html">parallel_do Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g3383e2703977012b6f384d673410f1f7"></a><!-- doxytag: member="tbb::parallel_do" ref="g3383e2703977012b6f384d673410f1f7" args="(Iterator first, Iterator last, const Body &amp;body)" -->
+template&lt;typename Iterator, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g3383e2703977012b6f384d673410f1f7">parallel_do</a> (Iterator first, Iterator last, const Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range, with optional addition of more work. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2617dc9b88b3285a7212599d49f74228"></a><!-- doxytag: member="tbb::parallel_do" ref="g2617dc9b88b3285a7212599d49f74228" args="(Iterator first, Iterator last, const Body &amp;body, task_group_context &amp;context)" -->
+template&lt;typename Iterator, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g2617dc9b88b3285a7212599d49f74228">parallel_do</a> (Iterator first, Iterator last, const Body &amp;body, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range, with optional addition of more work and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_for</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_for_body_req.html">parallel_for Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g68cc046ef72c42ce205fccbc435a0d81"></a><!-- doxytag: member="tbb::parallel_for" ref="g68cc046ef72c42ce205fccbc435a0d81" args="(const Range &amp;range, const Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a> (const Range &amp;range, const Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g13cac5dd55c7533bccea43a51c33d0e5"></a><!-- doxytag: member="tbb::parallel_for" ref="g13cac5dd55c7533bccea43a51c33d0e5" args="(const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g13cac5dd55c7533bccea43a51c33d0e5">parallel_for</a> (const Range &amp;range, const Body &amp;body, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with simple partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ga7ac75d532389b55b9247f3fdb0b00d1"></a><!-- doxytag: member="tbb::parallel_for" ref="ga7ac75d532389b55b9247f3fdb0b00d1" args="(const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#ga7ac75d532389b55b9247f3fdb0b00d1">parallel_for</a> (const Range &amp;range, const Body &amp;body, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g9cd1b210ceb1c040f30e390b4a21bde8"></a><!-- doxytag: member="tbb::parallel_for" ref="g9cd1b210ceb1c040f30e390b4a21bde8" args="(const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g9cd1b210ceb1c040f30e390b4a21bde8">parallel_for</a> (const Range &amp;range, const Body &amp;body, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2d317a5e0078cd193125439fed60dfdc"></a><!-- doxytag: member="tbb::parallel_for" ref="g2d317a5e0078cd193125439fed60dfdc" args="(const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g2d317a5e0078cd193125439fed60dfdc">parallel_for</a> (const Range &amp;range, const Body &amp;body, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1c0700e3f85e83a788ff3ede88ebb7e9"></a><!-- doxytag: member="tbb::parallel_for" ref="g1c0700e3f85e83a788ff3ede88ebb7e9" args="(const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1c0700e3f85e83a788ff3ede88ebb7e9">parallel_for</a> (const Range &amp;range, const Body &amp;body, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g04b4696b67370c01353ff5974c8f1196"></a><!-- doxytag: member="tbb::parallel_for" ref="g04b4696b67370c01353ff5974c8f1196" args="(const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g04b4696b67370c01353ff5974c8f1196">parallel_for</a> (const Range &amp;range, const Body &amp;body, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_for_each</h2></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gcd40c32f319747e61a8f73fcfc452001"></a><!-- doxytag: member="tbb::parallel_for_each" ref="gcd40c32f319747e61a8f73fcfc452001" args="(InputIterator first, InputIterator last, const Function &amp;f, task_group_context &amp;context)" -->
+template&lt;typename InputIterator, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gcd40c32f319747e61a8f73fcfc452001">parallel_for_each</a> (InputIterator first, InputIterator last, const Function &amp;f, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Calls function f for all items from [first, last) interval using user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc2d710ca573f0a9bd94379cba3772def"></a><!-- doxytag: member="tbb::parallel_for_each" ref="gc2d710ca573f0a9bd94379cba3772def" args="(InputIterator first, InputIterator last, const Function &amp;f)" -->
+template&lt;typename InputIterator, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc2d710ca573f0a9bd94379cba3772def">parallel_for_each</a> (InputIterator first, InputIterator last, const Function &amp;f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Uses default context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_invoke</h2></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd3e2998f171494f94c2103f4eb924084"></a><!-- doxytag: member="tbb::parallel_invoke" ref="gd3e2998f171494f94c2103f4eb924084" args="(const F0 &amp;f0, const F1 &amp;f1, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a> (const F0 &amp;f0, const F1 &amp;f1, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Executes a list of tasks in parallel and waits for all tasks to complete. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g4eb73562e4145c8343ed9f996267f039"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g4eb73562e4145c8343ed9f996267f039" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd1335c4b54fbf8d3f5be6a5c255c8c60"></a><!-- doxytag: member="tbb::parallel_invoke" ref="gd1335c4b54fbf8d3f5be6a5c255c8c60" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g40b88e3db7ecb09cbfe0230ea1c24030"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g40b88e3db7ecb09cbfe0230ea1c24030" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g98a8b388f7e0b7621a964a8c23752d1d"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g98a8b388f7e0b7621a964a8c23752d1d" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb95f31638bdde9d909361ad2e96a93eb"></a><!-- doxytag: member="tbb::parallel_invoke" ref="gb95f31638bdde9d909361ad2e96a93eb" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1655b24786f010ee0a008907a07bb61d"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g1655b24786f010ee0a008907a07bb61d" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g105220f1c95f9888b696a3e47027527b"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g105220f1c95f9888b696a3e47027527b" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1ef1774e0fcc4f632fe0af2591781c4c"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g1ef1774e0fcc4f632fe0af2591781c4c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ge832d8ad8b246c884e3c897ed63f8216"></a><!-- doxytag: member="tbb::parallel_invoke" ref="ge832d8ad8b246c884e3c897ed63f8216" args="(const F0 &amp;f0, const F1 &amp;f1)" -->
+template&lt;typename F0, typename F1&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb91fec4e7ba49754ad583ccb127afc66"></a><!-- doxytag: member="tbb::parallel_invoke" ref="gb91fec4e7ba49754ad583ccb127afc66" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2)" -->
+template&lt;typename F0, typename F1, typename F2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g421d0f66ee69eea134a35d1ae371d8d6"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g421d0f66ee69eea134a35d1ae371d8d6" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g8aacce74d691b4f44a0f7becadd9578c"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g8aacce74d691b4f44a0f7becadd9578c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g8cc6994f7cd6eaf25feb5d7cc04a2e64"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g8cc6994f7cd6eaf25feb5d7cc04a2e64" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g995ba9583ae24bddb8bd9a599cc8b4c7"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g995ba9583ae24bddb8bd9a599cc8b4c7" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g34341a4c24c6f548886cd14077374c5c"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g34341a4c24c6f548886cd14077374c5c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2a11342753488d460866d48370a69517"></a><!-- doxytag: member="tbb::parallel_invoke" ref="g2a11342753488d460866d48370a69517" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gffcf6835ceee43455f310352a3b4faa5"></a><!-- doxytag: member="tbb::parallel_invoke" ref="gffcf6835ceee43455f310352a3b4faa5" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9)</td></tr>
+
+<tr><td colspan="2"><br><h2>parallel_reduce</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_reduce_body_req.html">parallel_reduce Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1b3d59c5eb62683c5754db6970392fa3"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g1b3d59c5eb62683c5754db6970392fa3" args="(const Range &amp;range, Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a> (const Range &amp;range, Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gec1b7c03f9da909bef5db12e3d41bed3"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gec1b7c03f9da909bef5db12e3d41bed3" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gec1b7c03f9da909bef5db12e3d41bed3">parallel_reduce</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g18a19157e6245992fc00ca0adeb7dd37"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g18a19157e6245992fc00ca0adeb7dd37" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g18a19157e6245992fc00ca0adeb7dd37">parallel_reduce</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc61e73fcc36c92d79a217fc355ff4a6b"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gc61e73fcc36c92d79a217fc355ff4a6b" args="(const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc61e73fcc36c92d79a217fc355ff4a6b">parallel_reduce</a> (const Range &amp;range, Body &amp;body, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g45cb00c42a18e334bbde8b7535afe460"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g45cb00c42a18e334bbde8b7535afe460" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g45cb00c42a18e334bbde8b7535afe460">parallel_reduce</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1c1ea1d7c61b3c225e92c70d669a53a5"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g1c1ea1d7c61b3c225e92c70d669a53a5" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1c1ea1d7c61b3c225e92c70d669a53a5">parallel_reduce</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd9ac3a3811060314695f33b703c6e11b"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gd9ac3a3811060314695f33b703c6e11b" args="(const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gd9ac3a3811060314695f33b703c6e11b">parallel_reduce</a> (const Range &amp;range, Body &amp;body, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc9412e09fb01fcad8c018ea9cffb28ef"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gc9412e09fb01fcad8c018ea9cffb28ef" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc9412e09fb01fcad8c018ea9cffb28ef">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gaddffeec0e892ac3d6fc7fc2053e1eca"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gaddffeec0e892ac3d6fc7fc2053e1eca" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gaddffeec0e892ac3d6fc7fc2053e1eca">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb175401f0729e40dd2c5860a17c14385"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gb175401f0729e40dd2c5860a17c14385" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gb175401f0729e40dd2c5860a17c14385">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb7f1f1828ae2b330ce05b8513a495154"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gb7f1f1828ae2b330ce05b8513a495154" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gb7f1f1828ae2b330ce05b8513a495154">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gfbc0cc2026d87f11a96bcd62788f5bb5"></a><!-- doxytag: member="tbb::parallel_reduce" ref="gfbc0cc2026d87f11a96bcd62788f5bb5" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gfbc0cc2026d87f11a96bcd62788f5bb5">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g630c90a399937d9d4ae70ff883186dfd"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g630c90a399937d9d4ae70ff883186dfd" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g630c90a399937d9d4ae70ff883186dfd">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g496bd7eadb3b97495ccb5655ef90319e"></a><!-- doxytag: member="tbb::parallel_reduce" ref="g496bd7eadb3b97495ccb5655ef90319e" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g496bd7eadb3b97495ccb5655ef90319e">parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, <a class="el" href="a00145.html">affinity_partitioner</a> &amp;partitioner, <a class="el" href="a00206.html">task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_scan</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_scan_body_req.html">parallel_scan Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ged143f31dd3d96ded02ab3db915b91c7"></a><!-- doxytag: member="tbb::parallel_scan" ref="ged143f31dd3d96ded02ab3db915b91c7" args="(const Range &amp;range, Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">parallel_scan</a> (const Range &amp;range, Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc9fac8870b2e6365fb337014404529df"></a><!-- doxytag: member="tbb::parallel_scan" ref="gc9fac8870b2e6365fb337014404529df" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc9fac8870b2e6365fb337014404529df">parallel_scan</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00198.html">simple_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g62fde400a37bbca1a2fddc8e3d22f556"></a><!-- doxytag: member="tbb::parallel_scan" ref="g62fde400a37bbca1a2fddc8e3d22f556" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g62fde400a37bbca1a2fddc8e3d22f556">parallel_scan</a> (const Range &amp;range, Body &amp;body, const <a class="el" href="a00150.html">auto_partitioner</a> &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_sort</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="parallel_sort_iter_req.html">iterators for parallel_sort</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2">template&lt;typename RandomAccessIterator, typename Compare&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a> (RandomAccessIterator begin, RandomAccessIterator end, const Compare &amp;comp)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in [begin,end) using the given comparator.  <a href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa"></a><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g16c3eb77d0e530834c51ce3857f01012"></a><!-- doxytag: member="tbb::parallel_sort" ref="g16c3eb77d0e530834c51ce3857f01012" args="(RandomAccessIterator begin, RandomAccessIterator end)" -->
+template&lt;typename RandomAccessIterator&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g16c3eb77d0e530834c51ce3857f01012">parallel_sort</a> (RandomAccessIterator begin, RandomAccessIterator end)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in [begin,end) with a default comparator <code>std::less&lt;RandomAccessIterator&gt;</code>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc7576f82fdedc8a701a6c17ad9415926"></a><!-- doxytag: member="tbb::parallel_sort" ref="gc7576f82fdedc8a701a6c17ad9415926" args="(T *begin, T *end)" -->
+template&lt;typename T&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc7576f82fdedc8a701a6c17ad9415926">parallel_sort</a> (T *begin, T *end)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in the range <code></code>[begin,end) with a default comparator <code>std::less&lt;T&gt;</code>. <br></td></tr>
+<tr><td colspan="2"><br><h2>Typedefs</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="bf4486f36580f7d4bf95aed2e052a380"></a><!-- doxytag: member="tbb::critical_section" ref="bf4486f36580f7d4bf95aed2e052a380" args="" -->
+typedef internal::critical_section_v4&nbsp;</td><td class="memItemRight" valign="bottom"><b>critical_section</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7fdc2b067a243747d4c3dfe6f3d28476"></a><!-- doxytag: member="tbb::spin_rw_mutex" ref="7fdc2b067a243747d4c3dfe6f3d28476" args="" -->
+typedef <a class="el" href="a00201.html">spin_rw_mutex_v3</a>&nbsp;</td><td class="memItemRight" valign="bottom"><b>spin_rw_mutex</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="a3d457eb737199125df5ab0cf2f44094"></a><!-- doxytag: member="tbb::stack_size_type" ref="a3d457eb737199125df5ab0cf2f44094" args="" -->
+typedef std::size_t&nbsp;</td><td class="memItemRight" valign="bottom"><b>stack_size_type</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ceb89493291d4b03ce5f062fbaac0653"></a><!-- doxytag: member="tbb::task_scheduler_observer" ref="ceb89493291d4b03ce5f062fbaac0653" args="" -->
+typedef internal::task_scheduler_observer_v3&nbsp;</td><td class="memItemRight" valign="bottom"><b>task_scheduler_observer</b></td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ed375248ff6019a70ca0f9da528e5d0b"></a><!-- doxytag: member="tbb::assertion_handler_type" ref="ed375248ff6019a70ca0f9da528e5d0b" args="(const char *filename, int line, const char *expression, const char *comment)" -->
+typedef void(*)&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a> (const char *filename, int line, const char *expression, const char *comment)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Type for an assertion handler. <br></td></tr>
+<tr><td colspan="2"><br><h2>Enumerations</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef">memory_semantics</a> { <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>, 
+<a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>, 
+<a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>
+ }</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Specifies memory fencing.  <a href="a00272.html#a8686246bb5d3664bd07563749970fef">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#a8622ae61b7e7737dac26542e181178e">ets_key_usage_type</a> { <b>ets_key_per_instance</b>, 
+<b>ets_no_key</b>
+ }</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">enum for selecting between single key and key-per-instance versions <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">enum &nbsp;</td><td class="memItemRight" valign="bottom"><b>task_group_status</b> { <b>not_complete</b>, 
+<b>complete</b>, 
+<b>canceled</b>
+ }</td></tr>
+
+<tr><td colspan="2"><br><h2>Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="2e067bc86f20023cf3034f2ac310927b"></a><!-- doxytag: member="tbb::__TBB_DECL_ATOMIC" ref="2e067bc86f20023cf3034f2ac310927b" args="(__TBB_LONG_LONG) __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) __TBB_DECL_ATOMIC(long) __TBB_DECL_ATOMIC(unsigned long) __TBB_DECL_ATOMIC_ALT(unsigned" -->
+&nbsp;</td><td class="memItemRight" valign="bottom"><b>__TBB_DECL_ATOMIC</b> (__TBB_LONG_LONG) __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) __TBB_DECL_ATOMIC(long) __TBB_DECL_ATOMIC(unsigned long) __TBB_DECL_ATOMIC_ALT(unsigned</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="ad165cf61abbe349d413df2589679add"></a><!-- doxytag: member="tbb::__TBB_DECL_ATOMIC_ALT" ref="ad165cf61abbe349d413df2589679add" args="(int, ptrdiff_t) __TBB_DECL_ATOMIC(unsigned) __TBB_DECL_ATOMIC(int) __TBB_DECL_ATOMIC(unsigned short) __TBB_DECL_ATOMIC(short) __TBB_DECL_ATOMIC(char) __TBB_DECL_ATOMIC(signed char) __TBB_DECL_ATOMIC(unsigned char) __TBB_DECL_ATOMIC(wchar_t) template&lt; typename T &gt; struct atomic&lt; T * &gt;" -->
+size_t&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#ad165cf61abbe349d413df2589679add">__TBB_DECL_ATOMIC_ALT</a> (int, ptrdiff_t) __TBB_DECL_ATOMIC(unsigned) __TBB_DECL_ATOMIC(int) __TBB_DECL_ATOMIC(unsigned short) __TBB_DECL_ATOMIC(short) __TBB_DECL_ATOMIC(char) __TBB_DECL_ATOMIC(signed char) __TBB_DECL_ATOMIC(unsigned char) __TBB_DECL_ATOMIC(wchar_t) template&lt; typename T &gt; struct <a class="el" href="a00147.html">atomic</a>&lt; T * &gt;</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Specialization for atomic&lt;T*&gt; with arithmetic and operator-&gt;. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="7af9509624a62ea848afe775f892ed12"></a><!-- doxytag: member="tbb::operator==" ref="7af9509624a62ea848afe775f892ed12" args="(const cache_aligned_allocator&lt; T &gt; &amp;, const cache_aligned_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00155.html">cache_aligned_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00155.html">cache_aligned_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="426abbf5243087148f5e3767e68c286b"></a><!-- doxytag: member="tbb::operator!=" ref="426abbf5243087148f5e3767e68c286b" args="(const cache_aligned_allocator&lt; T &gt; &amp;, const cache_aligned_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00155.html">cache_aligned_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00155.html">cache_aligned_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="f3609fc523a99c101572fdc68f918d66"></a><!-- doxytag: member="tbb::operator==" ref="f3609fc523a99c101572fdc68f918d66" args="(const concurrent_hash_map&lt; Key, T, HashCompare, A1 &gt; &amp;a, const concurrent_hash_map&lt; Key, T, HashCompare, A2 &gt; &amp;b)" -->
+template&lt;typename Key, typename T, typename HashCompare, typename A1, typename A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A1 &gt; &amp;a, const <a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="2da30ff86b9a39722f45bc35e1c6934d"></a><!-- doxytag: member="tbb::operator!=" ref="2da30ff86b9a39722f45bc35e1c6934d" args="(const concurrent_hash_map&lt; Key, T, HashCompare, A1 &gt; &amp;a, const concurrent_hash_map&lt; Key, T, HashCompare, A2 &gt; &amp;b)" -->
+template&lt;typename Key, typename T, typename HashCompare, typename A1, typename A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A1 &gt; &amp;a, const <a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="817c20df80fe1a933a8557eb76113e24"></a><!-- doxytag: member="tbb::swap" ref="817c20df80fe1a933a8557eb76113e24" args="(concurrent_hash_map&lt; Key, T, HashCompare, A &gt; &amp;a, concurrent_hash_map&lt; Key, T, HashCompare, A &gt; &amp;b)" -->
+template&lt;typename Key, typename T, typename HashCompare, typename A&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>swap</b> (<a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A &gt; &amp;a, <a class="el" href="a00160.html">concurrent_hash_map</a>&lt; Key, T, HashCompare, A &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="11419db87ac98110907dda08a24f0949"></a><!-- doxytag: member="tbb::operator==" ref="11419db87ac98110907dda08a24f0949" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="aa9e09f2e9154ffd6658fad8355fb491"></a><!-- doxytag: member="tbb::operator!=" ref="aa9e09f2e9154ffd6658fad8355fb491" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="33267dd805415351956d45b4b5347190"></a><!-- doxytag: member="tbb::operator&lt;" ref="33267dd805415351956d45b4b5347190" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator&lt;</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="14e2968ab20cb714bef1f0352fc152f0"></a><!-- doxytag: member="tbb::operator&gt;" ref="14e2968ab20cb714bef1f0352fc152f0" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator&gt;</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="0042a36a670a397fb52e713edbaecd0e"></a><!-- doxytag: member="tbb::operator&lt;=" ref="0042a36a670a397fb52e713edbaecd0e" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator&lt;=</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="f70eb1d931473b69ba4bcf93af8baa33"></a><!-- doxytag: member="tbb::operator&gt;=" ref="f70eb1d931473b69ba4bcf93af8baa33" args="(const concurrent_vector&lt; T, A1 &gt; &amp;a, const concurrent_vector&lt; T, A2 &gt; &amp;b)" -->
+template&lt;typename T, class A1, class A2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator&gt;=</b> (const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A1 &gt; &amp;a, const <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="4a3897ea8d8a48e885d764bd7d370d50"></a><!-- doxytag: member="tbb::swap" ref="4a3897ea8d8a48e885d764bd7d370d50" args="(concurrent_vector&lt; T, A &gt; &amp;a, concurrent_vector&lt; T, A &gt; &amp;b)" -->
+template&lt;typename T, class A&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>swap</b> (<a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A &gt; &amp;a, <a class="el" href="a00166.html">concurrent_vector</a>&lt; T, A &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="906ebb461ecb0446989739fd0399e4b8"></a><!-- doxytag: member="tbb::operator==" ref="906ebb461ecb0446989739fd0399e4b8" args="(const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00196.html">scalable_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00196.html">scalable_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="a92757aca0a69082eb8dc223eb257433"></a><!-- doxytag: member="tbb::operator!=" ref="a92757aca0a69082eb8dc223eb257433" args="(const scalable_allocator&lt; T &gt; &amp;, const scalable_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00196.html">scalable_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00196.html">scalable_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="7e2f5baabe173fbf48e94fb9058f1b41"></a><!-- doxytag: member="tbb::is_current_task_group_canceling" ref="7e2f5baabe173fbf48e94fb9058f1b41" args="()" -->
+bool&nbsp;</td><td class="memItemRight" valign="bottom"><b>is_current_task_group_canceling</b> ()</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="3bb9a8b837a9b7c4b5107caf01e81329"></a><!-- doxytag: member="tbb::make_task" ref="3bb9a8b837a9b7c4b5107caf01e81329" args="(const F &amp;f)" -->
+template&lt;class F&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">task_handle&lt; F &gt;&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>make_task</b> (const F &amp;f)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="c8a46d6c0fe474eb399d0d09c27a4685"></a><!-- doxytag: member="tbb::operator==" ref="c8a46d6c0fe474eb399d0d09c27a4685" args="(const tbb_allocator&lt; T &gt; &amp;, const tbb_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00209.html">tbb_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00209.html">tbb_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="3d9665c21c894f06f1614947103dc9d6"></a><!-- doxytag: member="tbb::operator!=" ref="3d9665c21c894f06f1614947103dc9d6" args="(const tbb_allocator&lt; T &gt; &amp;, const tbb_allocator&lt; U &gt; &amp;)" -->
+template&lt;typename T, typename U&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00209.html">tbb_allocator</a>&lt; T &gt; &amp;, const <a class="el" href="a00209.html">tbb_allocator</a>&lt; U &gt; &amp;)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="b64fb7e52f0049b5a103cbb6fb5814f0"></a><!-- doxytag: member="tbb::operator==" ref="b64fb7e52f0049b5a103cbb6fb5814f0" args="(const zero_allocator&lt; T1, B1 &gt; &amp;a, const zero_allocator&lt; T2, B2 &gt; &amp;b)" -->
+template&lt;typename T1, template&lt; typename X1 &gt; class B1, typename T2, template&lt; typename X2 &gt; class B2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator==</b> (const <a class="el" href="a00218.html">zero_allocator</a>&lt; T1, B1 &gt; &amp;a, const <a class="el" href="a00218.html">zero_allocator</a>&lt; T2, B2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ae43b5151d0220fe3ade0b447cd64f0d"></a><!-- doxytag: member="tbb::operator!=" ref="ae43b5151d0220fe3ade0b447cd64f0d" args="(const zero_allocator&lt; T1, B1 &gt; &amp;a, const zero_allocator&lt; T2, B2 &gt; &amp;b)" -->
+template&lt;typename T1, template&lt; typename X1 &gt; class B1, typename T2, template&lt; typename X2 &gt; class B2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">bool&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>operator!=</b> (const <a class="el" href="a00218.html">zero_allocator</a>&lt; T1, B1 &gt; &amp;a, const <a class="el" href="a00218.html">zero_allocator</a>&lt; T2, B2 &gt; &amp;b)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="823fa1c15dd829d1d9167157450ddcd9"></a><!-- doxytag: member="tbb::set_assertion_handler" ref="823fa1c15dd829d1d9167157450ddcd9" args="(assertion_handler_type new_handler)" -->
+<a class="el" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a> __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#823fa1c15dd829d1d9167157450ddcd9">set_assertion_handler</a> (<a class="el" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a> new_handler)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Set assertion handler and return previous value of it. <br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#3d1252787be39b4aef311f1cadaff9e8">assertion_failure</a> (const char *filename, int line, const char *expression, const char *comment)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Process an assertion failure.  <a href="#3d1252787be39b4aef311f1cadaff9e8"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00272.html#a6858b22e90041c9c4669674ff39b056">TBB_runtime_interface_version</a> ()</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The function returns the interface version of the TBB shared library being used.  <a href="#a6858b22e90041c9c4669674ff39b056"></a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top"><a class="anchor" name="6a42aa5dbcf06ba86c71efb73d8311c3"></a><!-- doxytag: member="tbb::operator-" ref="6a42aa5dbcf06ba86c71efb73d8311c3" args="(const tick_count &amp;t1, const tick_count &amp;t0)" -->
+<a class="el" href="a00216.html">tick_count::interval_t</a>&nbsp;</td><td class="memItemRight" valign="bottom"><b>operator-</b> (const <a class="el" href="a00215.html">tick_count</a> &amp;t1, const <a class="el" href="a00215.html">tick_count</a> &amp;t0)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+The namespace tbb contains all components of the library. <hr><h2>Enumeration Type Documentation</h2>
+<a class="anchor" name="a8686246bb5d3664bd07563749970fef"></a><!-- doxytag: member="tbb::memory_semantics" ref="a8686246bb5d3664bd07563749970fef" args="" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef">tbb::memory_semantics</a>          </td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Specifies memory fencing. 
+<p>
+<dl compact><dt><b>Enumerator: </b></dt><dd>
+<table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" name="a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c"></a><!-- doxytag: member="__TBB_full_fence" ref="a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c" args="" -->__TBB_full_fence</em>&nbsp;</td><td>
+For internal use only. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7"></a><!-- doxytag: member="acquire" ref="a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7" args="" -->acquire</em>&nbsp;</td><td>
+Acquire fence. </td></tr>
+<tr><td valign="top"><em><a class="anchor" name="a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70"></a><!-- doxytag: member="release" ref="a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70" args="" -->release</em>&nbsp;</td><td>
+Release fence. </td></tr>
+</table>
+</dl>
+
+</div>
+</div><p>
+<hr><h2>Function Documentation</h2>
+<a class="anchor" name="3d1252787be39b4aef311f1cadaff9e8"></a><!-- doxytag: member="tbb::assertion_failure" ref="3d1252787be39b4aef311f1cadaff9e8" args="(const char *filename, int line, const char *expression, const char *comment)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_FUNC tbb::assertion_failure           </td>
+          <td>(</td>
+          <td class="paramtype">const char *&nbsp;</td>
+          <td class="paramname"> <em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int&nbsp;</td>
+          <td class="paramname"> <em>line</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const char *&nbsp;</td>
+          <td class="paramname"> <em>expression</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const char *&nbsp;</td>
+          <td class="paramname"> <em>comment</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Process an assertion failure. 
+<p>
+Normally called from __TBB_ASSERT macro. If assertion handler is null, print message for assertion failure and abort. Otherwise call the assertion handler. 
+</div>
+</div><p>
+<a class="anchor" name="a6858b22e90041c9c4669674ff39b056"></a><!-- doxytag: member="tbb::TBB_runtime_interface_version" ref="a6858b22e90041c9c4669674ff39b056" args="()" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int __TBB_EXPORTED_FUNC tbb::TBB_runtime_interface_version           </td>
+          <td>(</td>
+          <td class="paramname">          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The function returns the interface version of the TBB shared library being used. 
+<p>
+The version it returns is determined at runtime, not at compile/link time. So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. 
+</div>
+</div><p>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00279.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00279.html
new file mode 100644 (file)
index 0000000..bda5b58
--- /dev/null
@@ -0,0 +1,61 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb::strict_ppl Namespace Reference</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="a00272.html">tbb</a>::<a class="el" href="a00279.html">strict_ppl</a></div>
+<h1>tbb::strict_ppl Namespace Reference</h1>For internal use only.  
+<a href="#_details">More...</a>
+<p>
+<table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html">concurrent_queue</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A high-performance thread-safe non-blocking concurrent queue.  <a href="a00164.html#_details">More...</a><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="15c764c70c8a32e7a4b8c291d0cc8dde"></a><!-- doxytag: member="tbb::strict_ppl::parallel_for" ref="15c764c70c8a32e7a4b8c291d0cc8dde" args="(Index first, Index last, Index step, const Function &amp;f)" -->
+template&lt;typename Index, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00279.html#15c764c70c8a32e7a4b8c291d0cc8dde">parallel_for</a> (Index first, Index last, Index step, const Function &amp;f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range of integers with a step provided. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="fb5925ad98ec9608139661cfd2b9b88f"></a><!-- doxytag: member="tbb::strict_ppl::parallel_for" ref="fb5925ad98ec9608139661cfd2b9b88f" args="(Index first, Index last, Index step, const Function &amp;f, tbb::task_group_context &amp;context)" -->
+template&lt;typename Index, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_for</b> (Index first, Index last, Index step, const Function &amp;f, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="490399525b1e690ec31d6db964c6b272"></a><!-- doxytag: member="tbb::strict_ppl::parallel_for" ref="490399525b1e690ec31d6db964c6b272" args="(Index first, Index last, const Function &amp;f)" -->
+template&lt;typename Index, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00279.html#490399525b1e690ec31d6db964c6b272">parallel_for</a> (Index first, Index last, const Function &amp;f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range of integers with a default step value. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="bda0b331c88a836cb756fff0f661d609"></a><!-- doxytag: member="tbb::strict_ppl::parallel_for" ref="bda0b331c88a836cb756fff0f661d609" args="(Index first, Index last, const Function &amp;f, tbb::task_group_context &amp;context)" -->
+template&lt;typename Index, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>parallel_for</b> (Index first, Index last, const Function &amp;f, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+</table>
+<hr><a name="_details"></a><h2>Detailed Description</h2>
+For internal use only. <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00280.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00280.html
new file mode 100644 (file)
index 0000000..ae74120
--- /dev/null
@@ -0,0 +1,344 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Algorithms</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Algorithms</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A range over which to iterate.  <a href="a00152.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A 2-dimensional range that models the Range concept.  <a href="a00153.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A 3-dimensional range that models the Range concept.  <a href="a00154.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00186.html">tbb::pre_scan_tag</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to indicate that the initial scan is being performed.  <a href="a00186.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00171.html">tbb::final_scan_tag</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to indicate that the final scan is being performed.  <a href="a00171.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a stream, with optional addition of more work.  <a href="a00184.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00198.html">tbb::simple_partitioner</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A simple partitioner.  <a href="a00198.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00150.html">tbb::auto_partitioner</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">An auto partitioner.  <a href="a00150.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00169.html">tbb::filter</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A stage in a pipeline.  <a href="a00169.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A stage in a pipeline served by a user thread.  <a href="a00214.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00185.html">tbb::pipeline</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A processing pipeline that applies filters to items.  <a href="a00185.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00203.html">tbb::split</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Dummy type that distinguishes splitting constructor from copy constructor.  <a href="a00203.html#_details">More...</a><br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_do</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="parallel_do_body_req.html">parallel_do Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g3383e2703977012b6f384d673410f1f7"></a><!-- doxytag: member="algorithms::parallel_do" ref="g3383e2703977012b6f384d673410f1f7" args="(Iterator first, Iterator last, const Body &amp;body)" -->
+template&lt;typename Iterator, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g3383e2703977012b6f384d673410f1f7">tbb::parallel_do</a> (Iterator first, Iterator last, const Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range, with optional addition of more work. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2617dc9b88b3285a7212599d49f74228"></a><!-- doxytag: member="algorithms::parallel_do" ref="g2617dc9b88b3285a7212599d49f74228" args="(Iterator first, Iterator last, const Body &amp;body, task_group_context &amp;context)" -->
+template&lt;typename Iterator, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g2617dc9b88b3285a7212599d49f74228">tbb::parallel_do</a> (Iterator first, Iterator last, const Body &amp;body, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over a range, with optional addition of more work and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_for</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_for_body_req.html">parallel_for Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g68cc046ef72c42ce205fccbc435a0d81"></a><!-- doxytag: member="algorithms::parallel_for" ref="g68cc046ef72c42ce205fccbc435a0d81" args="(const Range &amp;range, const Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g13cac5dd55c7533bccea43a51c33d0e5"></a><!-- doxytag: member="algorithms::parallel_for" ref="g13cac5dd55c7533bccea43a51c33d0e5" args="(const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g13cac5dd55c7533bccea43a51c33d0e5">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with simple partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ga7ac75d532389b55b9247f3fdb0b00d1"></a><!-- doxytag: member="algorithms::parallel_for" ref="ga7ac75d532389b55b9247f3fdb0b00d1" args="(const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#ga7ac75d532389b55b9247f3fdb0b00d1">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g9cd1b210ceb1c040f30e390b4a21bde8"></a><!-- doxytag: member="algorithms::parallel_for" ref="g9cd1b210ceb1c040f30e390b4a21bde8" args="(const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g9cd1b210ceb1c040f30e390b4a21bde8">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2d317a5e0078cd193125439fed60dfdc"></a><!-- doxytag: member="algorithms::parallel_for" ref="g2d317a5e0078cd193125439fed60dfdc" args="(const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g2d317a5e0078cd193125439fed60dfdc">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1c0700e3f85e83a788ff3ede88ebb7e9"></a><!-- doxytag: member="algorithms::parallel_for" ref="g1c0700e3f85e83a788ff3ede88ebb7e9" args="(const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1c0700e3f85e83a788ff3ede88ebb7e9">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g04b4696b67370c01353ff5974c8f1196"></a><!-- doxytag: member="algorithms::parallel_for" ref="g04b4696b67370c01353ff5974c8f1196" args="(const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g04b4696b67370c01353ff5974c8f1196">tbb::parallel_for</a> (const Range &amp;range, const Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration over range with <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_for_each</h2></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gcd40c32f319747e61a8f73fcfc452001"></a><!-- doxytag: member="algorithms::parallel_for_each" ref="gcd40c32f319747e61a8f73fcfc452001" args="(InputIterator first, InputIterator last, const Function &amp;f, task_group_context &amp;context)" -->
+template&lt;typename InputIterator, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gcd40c32f319747e61a8f73fcfc452001">tbb::parallel_for_each</a> (InputIterator first, InputIterator last, const Function &amp;f, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Calls function f for all items from [first, last) interval using user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc2d710ca573f0a9bd94379cba3772def"></a><!-- doxytag: member="algorithms::parallel_for_each" ref="gc2d710ca573f0a9bd94379cba3772def" args="(InputIterator first, InputIterator last, const Function &amp;f)" -->
+template&lt;typename InputIterator, typename Function&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc2d710ca573f0a9bd94379cba3772def">tbb::parallel_for_each</a> (InputIterator first, InputIterator last, const Function &amp;f)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Uses default context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_invoke</h2></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd3e2998f171494f94c2103f4eb924084"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="gd3e2998f171494f94c2103f4eb924084" args="(const F0 &amp;f0, const F1 &amp;f1, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">tbb::parallel_invoke</a> (const F0 &amp;f0, const F1 &amp;f1, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Executes a list of tasks in parallel and waits for all tasks to complete. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g4eb73562e4145c8343ed9f996267f039"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g4eb73562e4145c8343ed9f996267f039" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd1335c4b54fbf8d3f5be6a5c255c8c60"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="gd1335c4b54fbf8d3f5be6a5c255c8c60" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g40b88e3db7ecb09cbfe0230ea1c24030"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g40b88e3db7ecb09cbfe0230ea1c24030" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g98a8b388f7e0b7621a964a8c23752d1d"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g98a8b388f7e0b7621a964a8c23752d1d" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb95f31638bdde9d909361ad2e96a93eb"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="gb95f31638bdde9d909361ad2e96a93eb" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1655b24786f010ee0a008907a07bb61d"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g1655b24786f010ee0a008907a07bb61d" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g105220f1c95f9888b696a3e47027527b"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g105220f1c95f9888b696a3e47027527b" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1ef1774e0fcc4f632fe0af2591781c4c"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g1ef1774e0fcc4f632fe0af2591781c4c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9, tbb::task_group_context &amp;context)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9, <a class="el" href="a00206.html">tbb::task_group_context</a> &amp;context)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ge832d8ad8b246c884e3c897ed63f8216"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="ge832d8ad8b246c884e3c897ed63f8216" args="(const F0 &amp;f0, const F1 &amp;f1)" -->
+template&lt;typename F0, typename F1&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb91fec4e7ba49754ad583ccb127afc66"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="gb91fec4e7ba49754ad583ccb127afc66" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2)" -->
+template&lt;typename F0, typename F1, typename F2&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g421d0f66ee69eea134a35d1ae371d8d6"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g421d0f66ee69eea134a35d1ae371d8d6" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g8aacce74d691b4f44a0f7becadd9578c"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g8aacce74d691b4f44a0f7becadd9578c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g8cc6994f7cd6eaf25feb5d7cc04a2e64"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g8cc6994f7cd6eaf25feb5d7cc04a2e64" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g995ba9583ae24bddb8bd9a599cc8b4c7"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g995ba9583ae24bddb8bd9a599cc8b4c7" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g34341a4c24c6f548886cd14077374c5c"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g34341a4c24c6f548886cd14077374c5c" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g2a11342753488d460866d48370a69517"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="g2a11342753488d460866d48370a69517" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8)</td></tr>
+
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gffcf6835ceee43455f310352a3b4faa5"></a><!-- doxytag: member="algorithms::parallel_invoke" ref="gffcf6835ceee43455f310352a3b4faa5" args="(const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9)" -->
+template&lt;typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6, typename F7, typename F8, typename F9&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><b>tbb::parallel_invoke</b> (const F0 &amp;f0, const F1 &amp;f1, const F2 &amp;f2, const F3 &amp;f3, const F4 &amp;f4, const F5 &amp;f5, const F6 &amp;f6, const F7 &amp;f7, const F8 &amp;f8, const F9 &amp;f9)</td></tr>
+
+<tr><td colspan="2"><br><h2>parallel_reduce</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_reduce_body_req.html">parallel_reduce Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1b3d59c5eb62683c5754db6970392fa3"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g1b3d59c5eb62683c5754db6970392fa3" args="(const Range &amp;range, Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gec1b7c03f9da909bef5db12e3d41bed3"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gec1b7c03f9da909bef5db12e3d41bed3" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gec1b7c03f9da909bef5db12e3d41bed3">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g18a19157e6245992fc00ca0adeb7dd37"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g18a19157e6245992fc00ca0adeb7dd37" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g18a19157e6245992fc00ca0adeb7dd37">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc61e73fcc36c92d79a217fc355ff4a6b"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gc61e73fcc36c92d79a217fc355ff4a6b" args="(const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc61e73fcc36c92d79a217fc355ff4a6b">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g45cb00c42a18e334bbde8b7535afe460"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g45cb00c42a18e334bbde8b7535afe460" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g45cb00c42a18e334bbde8b7535afe460">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g1c1ea1d7c61b3c225e92c70d669a53a5"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g1c1ea1d7c61b3c225e92c70d669a53a5" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g1c1ea1d7c61b3c225e92c70d669a53a5">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gd9ac3a3811060314695f33b703c6e11b"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gd9ac3a3811060314695f33b703c6e11b" args="(const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gd9ac3a3811060314695f33b703c6e11b">tbb::parallel_reduce</a> (const Range &amp;range, Body &amp;body, affinity_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc9412e09fb01fcad8c018ea9cffb28ef"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gc9412e09fb01fcad8c018ea9cffb28ef" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc9412e09fb01fcad8c018ea9cffb28ef">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gaddffeec0e892ac3d6fc7fc2053e1eca"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gaddffeec0e892ac3d6fc7fc2053e1eca" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gaddffeec0e892ac3d6fc7fc2053e1eca">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb175401f0729e40dd2c5860a17c14385"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gb175401f0729e40dd2c5860a17c14385" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gb175401f0729e40dd2c5860a17c14385">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gb7f1f1828ae2b330ce05b8513a495154"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gb7f1f1828ae2b330ce05b8513a495154" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gb7f1f1828ae2b330ce05b8513a495154">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction and <a class="el" href="a00145.html">affinity_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gfbc0cc2026d87f11a96bcd62788f5bb5"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="gfbc0cc2026d87f11a96bcd62788f5bb5" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gfbc0cc2026d87f11a96bcd62788f5bb5">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const simple_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, simple partitioner and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g630c90a399937d9d4ae70ff883186dfd"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g630c90a399937d9d4ae70ff883186dfd" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g630c90a399937d9d4ae70ff883186dfd">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, const auto_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00150.html">auto_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g496bd7eadb3b97495ccb5655ef90319e"></a><!-- doxytag: member="algorithms::parallel_reduce" ref="g496bd7eadb3b97495ccb5655ef90319e" args="(const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner, task_group_context &amp;context)" -->
+template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">Value&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g496bd7eadb3b97495ccb5655ef90319e">tbb::parallel_reduce</a> (const Range &amp;range, const Value &amp;identity, const RealBody &amp;real_body, const Reduction &amp;reduction, affinity_partitioner &amp;partitioner, task_group_context &amp;context)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel iteration with reduction, <a class="el" href="a00145.html">affinity_partitioner</a> and user-supplied context. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_scan</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="range_req.html">Range</a> and <a class="el" href="parallel_scan_body_req.html">parallel_scan Body</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="ged143f31dd3d96ded02ab3db915b91c7"></a><!-- doxytag: member="algorithms::parallel_scan" ref="ged143f31dd3d96ded02ab3db915b91c7" args="(const Range &amp;range, Body &amp;body)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">tbb::parallel_scan</a> (const Range &amp;range, Body &amp;body)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with default partitioner. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc9fac8870b2e6365fb337014404529df"></a><!-- doxytag: member="algorithms::parallel_scan" ref="gc9fac8870b2e6365fb337014404529df" args="(const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc9fac8870b2e6365fb337014404529df">tbb::parallel_scan</a> (const Range &amp;range, Body &amp;body, const simple_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with <a class="el" href="a00198.html">simple_partitioner</a>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g62fde400a37bbca1a2fddc8e3d22f556"></a><!-- doxytag: member="algorithms::parallel_scan" ref="g62fde400a37bbca1a2fddc8e3d22f556" args="(const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)" -->
+template&lt;typename Range, typename Body&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g62fde400a37bbca1a2fddc8e3d22f556">tbb::parallel_scan</a> (const Range &amp;range, Body &amp;body, const auto_partitioner &amp;partitioner)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Parallel prefix with <a class="el" href="a00150.html">auto_partitioner</a>. <br></td></tr>
+<tr><td colspan="2"><br><h2>parallel_sort</h2></td></tr>
+<tr><td colspan="2">See also requirements on <a class="el" href="parallel_sort_iter_req.html">iterators for parallel_sort</a>. <br><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2">template&lt;typename RandomAccessIterator, typename Compare&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">tbb::parallel_sort</a> (RandomAccessIterator begin, RandomAccessIterator end, const Compare &amp;comp)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in [begin,end) using the given comparator.  <a href="#g49edcf9447cd91a9527a3f8e8512b7aa"></a><br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="g16c3eb77d0e530834c51ce3857f01012"></a><!-- doxytag: member="algorithms::parallel_sort" ref="g16c3eb77d0e530834c51ce3857f01012" args="(RandomAccessIterator begin, RandomAccessIterator end)" -->
+template&lt;typename RandomAccessIterator&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#g16c3eb77d0e530834c51ce3857f01012">tbb::parallel_sort</a> (RandomAccessIterator begin, RandomAccessIterator end)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in [begin,end) with a default comparator <code>std::less&lt;RandomAccessIterator&gt;</code>. <br></td></tr>
+<tr><td class="memTemplParams" nowrap colspan="2"><a class="anchor" name="gc7576f82fdedc8a701a6c17ad9415926"></a><!-- doxytag: member="algorithms::parallel_sort" ref="gc7576f82fdedc8a701a6c17ad9415926" args="(T *begin, T *end)" -->
+template&lt;typename T&gt; </td></tr>
+<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">void&nbsp;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="a00280.html#gc7576f82fdedc8a701a6c17ad9415926">tbb::parallel_sort</a> (T *begin, T *end)</td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Sorts the data in the range <code></code>[begin,end) with a default comparator <code>std::less&lt;T&gt;</code>. <br></td></tr>
+</table>
+<hr><h2>Function Documentation</h2>
+<a class="anchor" name="g49edcf9447cd91a9527a3f8e8512b7aa"></a><!-- doxytag: member="tbb::parallel_sort" ref="g49edcf9447cd91a9527a3f8e8512b7aa" args="(RandomAccessIterator begin, RandomAccessIterator end, const Compare &amp;comp)" -->
+<div class="memitem">
+<div class="memproto">
+<div class="memtemplate">
+template&lt;typename RandomAccessIterator, typename Compare&gt; </div>
+      <table class="memname">
+        <tr>
+          <td class="memname">void tbb::parallel_sort           </td>
+          <td>(</td>
+          <td class="paramtype">RandomAccessIterator&nbsp;</td>
+          <td class="paramname"> <em>begin</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">RandomAccessIterator&nbsp;</td>
+          <td class="paramname"> <em>end</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const Compare &amp;&nbsp;</td>
+          <td class="paramname"> <em>comp</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+Sorts the data in [begin,end) using the given comparator. 
+<p>
+The compare function object is used for all comparisons between elements during sorting. The compare object must define a bool operator() function. 
+</div>
+</div><p>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00281.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00281.html
new file mode 100644 (file)
index 0000000..2416144
--- /dev/null
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Containers</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Containers</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Thread-local storage with optional reduction.  <a href="a00158.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Unordered map from Key to T.  <a href="a00160.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A high-performance thread-safe non-blocking concurrent queue.  <a href="a00164.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A high-performance thread-safe blocking concurrent bounded queue.  <a href="a00159.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A high-performance thread-safe blocking concurrent bounded queue.  <a href="a00165.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Concurrent vector container.  <a href="a00166.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">The <a class="el" href="a00168.html">enumerable_thread_specific</a> container.  <a href="a00168.html#_details">More...</a><br></td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00282.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00282.html
new file mode 100644 (file)
index 0000000..fe14cbe
--- /dev/null
@@ -0,0 +1,313 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Memory Allocation</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Memory Allocation</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Block of space aligned sufficiently to construct an array T with N elements.  <a href="a00146.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00155.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00156.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00196.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00197.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00209.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00210.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.  <a href="a00218.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1.  <a href="a00219.html#_details">More...</a><br></td></tr>
+<tr><td colspan="2"><br><h2>Functions</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_malloc</a> (size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_free</a> (void *ptr)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g951bbbbd2d041acb59ba5fa910b52543">scalable_realloc</a> (void *ptr, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g3f5a2fde0bcaa3eda35be32c8658f444">scalable_calloc</a> (size_t nobj, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">int __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g05dcec987480bb2c82ecdead6a085899">scalable_posix_memalign</a> (void **memptr, size_t alignment, size_t size)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gc1c7aaa1fe85c17ba5a3a96f7e8d89e7">scalable_aligned_malloc</a> (size_t size, size_t alignment)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void *__TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#gbaea91376be80dfabd7c93eaffd9abaa">scalable_aligned_realloc</a> (void *ptr, size_t size, size_t alignment)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">void __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g65a20e812012f15ec7442d5b45d0cba5">scalable_aligned_free</a> (void *ptr)</td></tr>
+
+<tr><td class="memItemLeft" nowrap align="right" valign="top">size_t __TBB_EXPORTED_FUNC&nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00282.html#g0965ce1b4b7835f92869c7fd867265f7">scalable_msize</a> (void *ptr)</td></tr>
+
+</table>
+<hr><h2>Function Documentation</h2>
+<a class="anchor" name="g65a20e812012f15ec7442d5b45d0cba5"></a><!-- doxytag: member="scalable_allocator.h::scalable_aligned_free" ref="g65a20e812012f15ec7442d5b45d0cba5" args="(void *ptr)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_FUNC scalable_aligned_free           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>ptr</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "_aligned_free" analogue. 
+</div>
+</div><p>
+<a class="anchor" name="gc1c7aaa1fe85c17ba5a3a96f7e8d89e7"></a><!-- doxytag: member="scalable_allocator.h::scalable_aligned_malloc" ref="gc1c7aaa1fe85c17ba5a3a96f7e8d89e7" args="(size_t size, size_t alignment)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void* __TBB_EXPORTED_FUNC scalable_aligned_malloc           </td>
+          <td>(</td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>alignment</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "_aligned_malloc" analogue. 
+</div>
+</div><p>
+<a class="anchor" name="gbaea91376be80dfabd7c93eaffd9abaa"></a><!-- doxytag: member="scalable_allocator.h::scalable_aligned_realloc" ref="gbaea91376be80dfabd7c93eaffd9abaa" args="(void *ptr, size_t size, size_t alignment)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void* __TBB_EXPORTED_FUNC scalable_aligned_realloc           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>ptr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>alignment</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "_aligned_realloc" analogue. 
+</div>
+</div><p>
+<a class="anchor" name="g3f5a2fde0bcaa3eda35be32c8658f444"></a><!-- doxytag: member="scalable_allocator.h::scalable_calloc" ref="g3f5a2fde0bcaa3eda35be32c8658f444" args="(size_t nobj, size_t size)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void* __TBB_EXPORTED_FUNC scalable_calloc           </td>
+          <td>(</td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>nobj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "calloc" analogue complementing scalable_malloc. 
+</div>
+</div><p>
+<a class="anchor" name="gca3579c21244dba9f0c351e5984d4565"></a><!-- doxytag: member="scalable_allocator.h::scalable_free" ref="gca3579c21244dba9f0c351e5984d4565" args="(void *ptr)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void __TBB_EXPORTED_FUNC scalable_free           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>ptr</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "free" analogue to discard a previously allocated piece of memory. 
+</div>
+</div><p>
+<a class="anchor" name="gc25b8e6c76db0b346a8249796a7a2475"></a><!-- doxytag: member="scalable_allocator.h::scalable_malloc" ref="gc25b8e6c76db0b346a8249796a7a2475" args="(size_t size)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void* __TBB_EXPORTED_FUNC scalable_malloc           </td>
+          <td>(</td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "malloc" analogue to allocate block of memory of size bytes. 
+</div>
+</div><p>
+<a class="anchor" name="g0965ce1b4b7835f92869c7fd867265f7"></a><!-- doxytag: member="scalable_allocator.h::scalable_msize" ref="g0965ce1b4b7835f92869c7fd867265f7" args="(void *ptr)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">size_t __TBB_EXPORTED_FUNC scalable_msize           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>ptr</em>          </td>
+          <td>&nbsp;)&nbsp;</td>
+          <td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The analogue of _msize/malloc_size/malloc_usable_size. Returns the usable size of a memory block previously allocated by scalable_*, or 0 (zero) if ptr does not point to such a block. 
+</div>
+</div><p>
+<a class="anchor" name="g05dcec987480bb2c82ecdead6a085899"></a><!-- doxytag: member="scalable_allocator.h::scalable_posix_memalign" ref="g05dcec987480bb2c82ecdead6a085899" args="(void **memptr, size_t alignment, size_t size)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int __TBB_EXPORTED_FUNC scalable_posix_memalign           </td>
+          <td>(</td>
+          <td class="paramtype">void **&nbsp;</td>
+          <td class="paramname"> <em>memptr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>alignment</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "posix_memalign" analogue. 
+</div>
+</div><p>
+<a class="anchor" name="g951bbbbd2d041acb59ba5fa910b52543"></a><!-- doxytag: member="scalable_allocator.h::scalable_realloc" ref="g951bbbbd2d041acb59ba5fa910b52543" args="(void *ptr, size_t size)" -->
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void* __TBB_EXPORTED_FUNC scalable_realloc           </td>
+          <td>(</td>
+          <td class="paramtype">void *&nbsp;</td>
+          <td class="paramname"> <em>ptr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t&nbsp;</td>
+          <td class="paramname"> <em>size</em></td><td>&nbsp;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td><td width="100%"></td>
+        </tr>
+      </table>
+</div>
+<div class="memdoc">
+
+<p>
+The "realloc" analogue complementing scalable_malloc. 
+</div>
+</div><p>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00283.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00283.html
new file mode 100644 (file)
index 0000000..545fd45
--- /dev/null
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Synchronization</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Synchronization</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">struct &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Primary template for atomic.  <a href="a00147.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00177.html">tbb::mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Wrapper around the platform's native reader-writer lock.  <a href="a00177.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00179.html">tbb::null_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A mutex which does nothing.  <a href="a00179.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A rw mutex which does nothing.  <a href="a00181.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00187.html">tbb::queuing_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Queuing lock with local-only spinning.  <a href="a00187.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Reader-writer lock with local-only spinning.  <a href="a00189.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Writer-preference reader-writer lock with local-only spinning on readers.  <a href="a00191.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00194.html">tbb::recursive_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Mutex that allows recursive mutex acquisition.  <a href="a00194.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00199.html">tbb::spin_mutex</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A lock that occupies a single byte.  <a href="a00199.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Fast, unfair, spinning reader-writer lock with backoff and writer-preference.  <a href="a00201.html#_details">More...</a><br></td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00284.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00284.html
new file mode 100644 (file)
index 0000000..ae00fc4
--- /dev/null
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Timing</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Timing</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00215.html">tbb::tick_count</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Absolute timestamp.  <a href="a00215.html#_details">More...</a><br></td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00285.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00285.html
new file mode 100644 (file)
index 0000000..283409b
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Task Scheduling</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Task Scheduling</h1><table border="0" cellpadding="0" cellspacing="0">
+<tr><td></td></tr>
+<tr><td colspan="2"><br><h2>Classes</h2></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00206.html">tbb::task_group_context</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Used to form groups of tasks.  <a href="a00206.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00204.html">tbb::task</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Base class for user-defined tasks.  <a href="a00204.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00167.html">tbb::empty_task</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">task that does nothing. Useful for synchronization.  <a href="a00167.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00207.html">tbb::task_list</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">A list of children.  <a href="a00207.html#_details">More...</a><br></td></tr>
+<tr><td class="memItemLeft" nowrap align="right" valign="top">class &nbsp;</td><td class="memItemRight" valign="bottom"><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td></tr>
+
+<tr><td class="mdescLeft">&nbsp;</td><td class="mdescRight">Class representing reference to tbb scheduler.  <a href="a00208.html#_details">More...</a><br></td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00286.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00286.html
new file mode 100644 (file)
index 0000000..6a92437
--- /dev/null
@@ -0,0 +1,939 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>_concurrent_queue_internal.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>_concurrent_queue_internal.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_concurrent_queue_internal_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_queue_internal_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "spin_mutex.h"</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00030"></a>00030 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00034"></a>00034 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00039"></a>00039 
+<a name="l00040"></a>00040 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span>
+<a name="l00044"></a>00044 
+<a name="l00045"></a><a class="code" href="a00272.html">00045</a> <span class="keyword">namespace </span>tbb {
+<a name="l00046"></a>00046 
+<a name="l00047"></a>00047 <span class="preprocessor">#if !__TBB_TEMPLATE_FRIENDS_BROKEN</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span>
+<a name="l00049"></a>00049 <span class="comment">// forward declaration</span>
+<a name="l00050"></a><a class="code" href="a00279.html">00050</a> <span class="keyword">namespace </span>strict_ppl {
+<a name="l00051"></a>00051 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> A&gt; <span class="keyword">class </span><a class="code" href="a00164.html">concurrent_queue</a>;
+<a name="l00052"></a>00052 }
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> A&gt; <span class="keyword">class </span><a class="code" href="a00159.html">concurrent_bounded_queue</a>;
+<a name="l00055"></a>00055 
+<a name="l00056"></a>00056 <span class="keyword">namespace </span>deprecated {
+<a name="l00057"></a>00057 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> A&gt; <span class="keyword">class </span><a class="code" href="a00164.html">concurrent_queue</a>;
+<a name="l00058"></a>00058 }
+<a name="l00059"></a>00059 <span class="preprocessor">#endif</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span>
+<a name="l00062"></a>00062 <span class="keyword">namespace </span>strict_ppl {
+<a name="l00063"></a>00063 
+<a name="l00065"></a>00065 <span class="keyword">namespace </span>internal {
+<a name="l00066"></a>00066 
+<a name="l00067"></a>00067 <span class="keyword">using namespace </span>tbb::internal;
+<a name="l00068"></a>00068 
+<a name="l00069"></a>00069 <span class="keyword">typedef</span> size_t ticket;
+<a name="l00070"></a>00070 
+<a name="l00071"></a>00071 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>micro_queue ;
+<a name="l00072"></a>00072 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>micro_queue_pop_finalizer ;
+<a name="l00073"></a>00073 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>concurrent_queue_base_v3;
+<a name="l00074"></a>00074 
+<a name="l00076"></a>00076 
+<a name="l00079"></a>00079 <span class="keyword">struct </span>concurrent_queue_rep_base : no_copy {
+<a name="l00080"></a>00080     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue;
+<a name="l00081"></a>00081     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_base_v3;
+<a name="l00082"></a>00082 
+<a name="l00083"></a>00083 <span class="keyword">protected</span>:
+<a name="l00085"></a>00085     <span class="keyword">static</span> <span class="keyword">const</span> size_t phi = 3;
+<a name="l00086"></a>00086 
+<a name="l00087"></a>00087 <span class="keyword">public</span>:
+<a name="l00088"></a>00088     <span class="comment">// must be power of 2</span>
+<a name="l00089"></a>00089     <span class="keyword">static</span> <span class="keyword">const</span> size_t n_queue = 8;
+<a name="l00090"></a>00090 
+<a name="l00092"></a>00092     <span class="keyword">struct </span>page {
+<a name="l00093"></a>00093         page* next;
+<a name="l00094"></a>00094         uintptr_t mask; 
+<a name="l00095"></a>00095     };
+<a name="l00096"></a>00096 
+<a name="l00097"></a>00097     atomic&lt;ticket&gt; head_counter;
+<a name="l00098"></a>00098     <span class="keywordtype">char</span> pad1[NFS_MaxLineSize-<span class="keyword">sizeof</span>(atomic&lt;ticket&gt;)];
+<a name="l00099"></a>00099     atomic&lt;ticket&gt; tail_counter;
+<a name="l00100"></a>00100     <span class="keywordtype">char</span> pad2[NFS_MaxLineSize-<span class="keyword">sizeof</span>(atomic&lt;ticket&gt;)];
+<a name="l00101"></a>00101 
+<a name="l00103"></a>00103     size_t items_per_page;
+<a name="l00104"></a>00104 
+<a name="l00106"></a>00106     size_t item_size;
+<a name="l00107"></a>00107 
+<a name="l00109"></a>00109     atomic&lt;size_t&gt; n_invalid_entries;
+<a name="l00110"></a>00110 
+<a name="l00111"></a>00111     <span class="keywordtype">char</span> pad3[NFS_MaxLineSize-<span class="keyword">sizeof</span>(size_t)-<span class="keyword">sizeof</span>(size_t)-<span class="keyword">sizeof</span>(atomic&lt;size_t&gt;)];
+<a name="l00112"></a>00112 } ;
+<a name="l00113"></a>00113 
+<a name="l00114"></a>00114 <span class="keyword">inline</span> <span class="keywordtype">bool</span> is_valid_page(<span class="keyword">const</span> concurrent_queue_rep_base::page* p) {
+<a name="l00115"></a>00115     <span class="keywordflow">return</span> uintptr_t(p)&gt;1;
+<a name="l00116"></a>00116 }
+<a name="l00117"></a>00117 
+<a name="l00119"></a>00119 
+<a name="l00122"></a>00122 <span class="keyword">class </span>concurrent_queue_page_allocator
+<a name="l00123"></a>00123 {
+<a name="l00124"></a>00124     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue ;
+<a name="l00125"></a>00125     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue_pop_finalizer ;
+<a name="l00126"></a>00126 <span class="keyword">protected</span>:
+<a name="l00127"></a>00127     <span class="keyword">virtual</span> ~concurrent_queue_page_allocator() {}
+<a name="l00128"></a>00128 <span class="keyword">private</span>:
+<a name="l00129"></a>00129     <span class="keyword">virtual</span> concurrent_queue_rep_base::page* allocate_page() = 0;
+<a name="l00130"></a>00130     <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_page( concurrent_queue_rep_base::page* p ) = 0;
+<a name="l00131"></a>00131 } ;
+<a name="l00132"></a>00132 
+<a name="l00133"></a>00133 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00134"></a>00134 <span class="preprocessor"></span><span class="comment">// unary minus operator applied to unsigned type, result still unsigned</span>
+<a name="l00135"></a>00135 <span class="preprocessor">#pragma warning( push )</span>
+<a name="l00136"></a>00136 <span class="preprocessor"></span><span class="preprocessor">#pragma warning( disable: 4146 )</span>
+<a name="l00137"></a>00137 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00138"></a>00138 <span class="preprocessor"></span>
+<a name="l00140"></a>00140 
+<a name="l00142"></a>00142 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00143"></a>00143 <span class="keyword">class </span>micro_queue : no_copy {
+<a name="l00144"></a>00144     <span class="keyword">typedef</span> concurrent_queue_rep_base::page page;
+<a name="l00145"></a>00145 
+<a name="l00147"></a>00147     <span class="keyword">class </span>destroyer: no_copy {
+<a name="l00148"></a>00148         T&amp; my_value;
+<a name="l00149"></a>00149     <span class="keyword">public</span>:
+<a name="l00150"></a>00150         destroyer( T&amp; value ) : my_value(value) {}
+<a name="l00151"></a>00151         ~destroyer() {my_value.~T();}          
+<a name="l00152"></a>00152     };
+<a name="l00153"></a>00153 
+<a name="l00154"></a>00154     <span class="keywordtype">void</span> copy_item( page&amp; dst, size_t index, <span class="keyword">const</span> <span class="keywordtype">void</span>* src ) {
+<a name="l00155"></a>00155         <span class="keyword">new</span>( &amp;get_ref(dst,index) ) T(*static_cast&lt;const T*&gt;(src)); 
+<a name="l00156"></a>00156     }
+<a name="l00157"></a>00157 
+<a name="l00158"></a>00158     <span class="keywordtype">void</span> copy_item( page&amp; dst, size_t dindex, <span class="keyword">const</span> page&amp; src, size_t sindex ) {
+<a name="l00159"></a>00159         <span class="keyword">new</span>( &amp;get_ref(dst,dindex) ) T( get_ref(const_cast&lt;page&amp;&gt;(src),sindex) );
+<a name="l00160"></a>00160     }
+<a name="l00161"></a>00161 
+<a name="l00162"></a>00162     <span class="keywordtype">void</span> assign_and_destroy_item( <span class="keywordtype">void</span>* dst, page&amp; src, size_t index ) {
+<a name="l00163"></a>00163         T&amp; from = get_ref(src,index);
+<a name="l00164"></a>00164         destroyer d(from);
+<a name="l00165"></a>00165         *static_cast&lt;T*&gt;(dst) = from;
+<a name="l00166"></a>00166     }
+<a name="l00167"></a>00167 
+<a name="l00168"></a>00168     <span class="keywordtype">void</span> spin_wait_until_my_turn( atomic&lt;ticket&gt;&amp; counter, ticket k, concurrent_queue_rep_base&amp; rb ) <span class="keyword">const </span>;
+<a name="l00169"></a>00169 
+<a name="l00170"></a>00170 <span class="keyword">public</span>:
+<a name="l00171"></a>00171     <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue_pop_finalizer&lt;T&gt;;
+<a name="l00172"></a>00172 
+<a name="l00173"></a>00173     <span class="keyword">struct </span>padded_page: page {
+<a name="l00175"></a>00175         padded_page(); 
+<a name="l00177"></a>00177         <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> padded_page&amp; );
+<a name="l00179"></a>00179         T last;
+<a name="l00180"></a>00180     };
+<a name="l00181"></a>00181 
+<a name="l00182"></a>00182     <span class="keyword">static</span> T&amp; get_ref( page&amp; p, size_t index ) {
+<a name="l00183"></a>00183         <span class="keywordflow">return</span> (&amp;static_cast&lt;padded_page*&gt;(static_cast&lt;void*&gt;(&amp;p))-&gt;last)[index];
+<a name="l00184"></a>00184     }
+<a name="l00185"></a>00185 
+<a name="l00186"></a>00186     atomic&lt;page*&gt; head_page;
+<a name="l00187"></a>00187     atomic&lt;ticket&gt; head_counter;
+<a name="l00188"></a>00188 
+<a name="l00189"></a>00189     atomic&lt;page*&gt; tail_page;
+<a name="l00190"></a>00190     atomic&lt;ticket&gt; tail_counter;
+<a name="l00191"></a>00191 
+<a name="l00192"></a>00192     spin_mutex page_mutex;
+<a name="l00193"></a>00193     
+<a name="l00194"></a>00194     <span class="keywordtype">void</span> push( <span class="keyword">const</span> <span class="keywordtype">void</span>* item, ticket k, concurrent_queue_base_v3&lt;T&gt;&amp; base ) ;
+<a name="l00195"></a>00195 
+<a name="l00196"></a>00196     <span class="keywordtype">bool</span> pop( <span class="keywordtype">void</span>* dst, ticket k, concurrent_queue_base_v3&lt;T&gt;&amp; base ) ;
+<a name="l00197"></a>00197 
+<a name="l00198"></a>00198     micro_queue&amp; assign( <span class="keyword">const</span> micro_queue&amp; src, concurrent_queue_base_v3&lt;T&gt;&amp; base ) ;
+<a name="l00199"></a>00199 
+<a name="l00200"></a>00200     page* make_copy( concurrent_queue_base_v3&lt;T&gt;&amp; base, <span class="keyword">const</span> page* src_page, size_t begin_in_page, size_t end_in_page, ticket&amp; g_index ) ;
+<a name="l00201"></a>00201 
+<a name="l00202"></a>00202     <span class="keywordtype">void</span> invalidate_page_and_rethrow( ticket k ) ;
+<a name="l00203"></a>00203 };
+<a name="l00204"></a>00204 
+<a name="l00205"></a>00205 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00206"></a>00206 <span class="keywordtype">void</span> micro_queue&lt;T&gt;::spin_wait_until_my_turn( atomic&lt;ticket&gt;&amp; counter, ticket k, concurrent_queue_rep_base&amp; rb )<span class="keyword"> const </span>{
+<a name="l00207"></a>00207     atomic_backoff backoff;
+<a name="l00208"></a>00208     <span class="keywordflow">do</span> {
+<a name="l00209"></a>00209         backoff.pause();
+<a name="l00210"></a>00210         <span class="keywordflow">if</span>( counter&amp;1 ) {
+<a name="l00211"></a>00211             ++rb.n_invalid_entries;
+<a name="l00212"></a>00212             throw_exception( eid_bad_last_alloc );
+<a name="l00213"></a>00213         }
+<a name="l00214"></a>00214     } <span class="keywordflow">while</span>( counter!=k ) ;
+<a name="l00215"></a>00215 }
+<a name="l00216"></a>00216 
+<a name="l00217"></a>00217 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00218"></a>00218 <span class="keywordtype">void</span> micro_queue&lt;T&gt;::push( <span class="keyword">const</span> <span class="keywordtype">void</span>* item, ticket k, concurrent_queue_base_v3&lt;T&gt;&amp; base ) {
+<a name="l00219"></a>00219     k &amp;= -concurrent_queue_rep_base::n_queue;
+<a name="l00220"></a>00220     page* p = NULL;
+<a name="l00221"></a>00221     size_t index = k/concurrent_queue_rep_base::n_queue &amp; (base.my_rep-&gt;items_per_page-1);
+<a name="l00222"></a>00222     <span class="keywordflow">if</span>( !index ) {
+<a name="l00223"></a>00223         __TBB_TRY {
+<a name="l00224"></a>00224             concurrent_queue_page_allocator&amp; pa = base;
+<a name="l00225"></a>00225             p = pa.allocate_page();
+<a name="l00226"></a>00226         } __TBB_CATCH (...) {
+<a name="l00227"></a>00227             ++base.my_rep-&gt;n_invalid_entries;
+<a name="l00228"></a>00228             invalidate_page_and_rethrow( k );
+<a name="l00229"></a>00229         }
+<a name="l00230"></a>00230         p-&gt;mask = 0;
+<a name="l00231"></a>00231         p-&gt;next = NULL;
+<a name="l00232"></a>00232     }
+<a name="l00233"></a>00233     
+<a name="l00234"></a>00234     <span class="keywordflow">if</span>( tail_counter!=k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep );
+<a name="l00235"></a>00235         
+<a name="l00236"></a>00236     <span class="keywordflow">if</span>( p ) {
+<a name="l00237"></a>00237         spin_mutex::scoped_lock lock( page_mutex );
+<a name="l00238"></a>00238         page* q = tail_page;
+<a name="l00239"></a>00239         <span class="keywordflow">if</span>( is_valid_page(q) )
+<a name="l00240"></a>00240             q-&gt;next = p;
+<a name="l00241"></a>00241         <span class="keywordflow">else</span>
+<a name="l00242"></a>00242             head_page = p; 
+<a name="l00243"></a>00243         tail_page = p;
+<a name="l00244"></a>00244     } <span class="keywordflow">else</span> {
+<a name="l00245"></a>00245         p = tail_page;
+<a name="l00246"></a>00246     }
+<a name="l00247"></a>00247    
+<a name="l00248"></a>00248     __TBB_TRY {
+<a name="l00249"></a>00249         copy_item( *p, index, item );
+<a name="l00250"></a>00250         <span class="comment">// If no exception was thrown, mark item as present.</span>
+<a name="l00251"></a>00251         p-&gt;mask |= uintptr_t(1)&lt;&lt;index;
+<a name="l00252"></a>00252         tail_counter += concurrent_queue_rep_base::n_queue; 
+<a name="l00253"></a>00253     } __TBB_CATCH (...) {
+<a name="l00254"></a>00254         ++base.my_rep-&gt;n_invalid_entries;
+<a name="l00255"></a>00255         tail_counter += concurrent_queue_rep_base::n_queue; 
+<a name="l00256"></a>00256         __TBB_RETHROW();
+<a name="l00257"></a>00257     }
+<a name="l00258"></a>00258 }
+<a name="l00259"></a>00259 
+<a name="l00260"></a>00260 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00261"></a>00261 <span class="keywordtype">bool</span> micro_queue&lt;T&gt;::pop( <span class="keywordtype">void</span>* dst, ticket k, concurrent_queue_base_v3&lt;T&gt;&amp; base ) {
+<a name="l00262"></a>00262     k &amp;= -concurrent_queue_rep_base::n_queue;
+<a name="l00263"></a>00263     <span class="keywordflow">if</span>( head_counter!=k ) spin_wait_until_eq( head_counter, k );
+<a name="l00264"></a>00264     <span class="keywordflow">if</span>( tail_counter==k ) spin_wait_while_eq( tail_counter, k );
+<a name="l00265"></a>00265     page&amp; p = *head_page;
+<a name="l00266"></a>00266     __TBB_ASSERT( &amp;p, NULL );
+<a name="l00267"></a>00267     size_t index = k/concurrent_queue_rep_base::n_queue &amp; (base.my_rep-&gt;items_per_page-1);
+<a name="l00268"></a>00268     <span class="keywordtype">bool</span> success = <span class="keyword">false</span>; 
+<a name="l00269"></a>00269     {
+<a name="l00270"></a>00270         micro_queue_pop_finalizer&lt;T&gt; finalizer( *<span class="keyword">this</span>, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep-&gt;items_per_page-1 ? &amp;p : NULL ); 
+<a name="l00271"></a>00271         <span class="keywordflow">if</span>( p.mask &amp; uintptr_t(1)&lt;&lt;index ) {
+<a name="l00272"></a>00272             success = <span class="keyword">true</span>;
+<a name="l00273"></a>00273             assign_and_destroy_item( dst, p, index );
+<a name="l00274"></a>00274         } <span class="keywordflow">else</span> {
+<a name="l00275"></a>00275             --base.my_rep-&gt;n_invalid_entries;
+<a name="l00276"></a>00276         }
+<a name="l00277"></a>00277     }
+<a name="l00278"></a>00278     <span class="keywordflow">return</span> success;
+<a name="l00279"></a>00279 }
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00282"></a>00282 micro_queue&lt;T&gt;&amp; micro_queue&lt;T&gt;::assign( <span class="keyword">const</span> micro_queue&lt;T&gt;&amp; src, concurrent_queue_base_v3&lt;T&gt;&amp; base ) {
+<a name="l00283"></a>00283     head_counter = src.head_counter;
+<a name="l00284"></a>00284     tail_counter = src.tail_counter;
+<a name="l00285"></a>00285     page_mutex   = src.page_mutex;
+<a name="l00286"></a>00286 
+<a name="l00287"></a>00287     <span class="keyword">const</span> page* srcp = src.head_page;
+<a name="l00288"></a>00288     <span class="keywordflow">if</span>( is_valid_page(srcp) ) {
+<a name="l00289"></a>00289         ticket g_index = head_counter;
+<a name="l00290"></a>00290         __TBB_TRY {
+<a name="l00291"></a>00291             size_t n_items  = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue;
+<a name="l00292"></a>00292             size_t index = head_counter/concurrent_queue_rep_base::n_queue &amp; (base.my_rep-&gt;items_per_page-1);
+<a name="l00293"></a>00293             size_t end_in_first_page = (index+n_items&lt;base.my_rep-&gt;items_per_page)?(index+n_items):base.my_rep-&gt;items_per_page;
+<a name="l00294"></a>00294 
+<a name="l00295"></a>00295             head_page = make_copy( base, srcp, index, end_in_first_page, g_index );
+<a name="l00296"></a>00296             page* cur_page = head_page;
+<a name="l00297"></a>00297 
+<a name="l00298"></a>00298             <span class="keywordflow">if</span>( srcp != src.tail_page ) {
+<a name="l00299"></a>00299                 <span class="keywordflow">for</span>( srcp = srcp-&gt;next; srcp!=src.tail_page; srcp=srcp-&gt;next ) {
+<a name="l00300"></a>00300                     cur_page-&gt;next = make_copy( base, srcp, 0, base.my_rep-&gt;items_per_page, g_index );
+<a name="l00301"></a>00301                     cur_page = cur_page-&gt;next;
+<a name="l00302"></a>00302                 }
+<a name="l00303"></a>00303 
+<a name="l00304"></a>00304                 __TBB_ASSERT( srcp==src.tail_page, NULL );
+<a name="l00305"></a>00305                 size_t last_index = tail_counter/concurrent_queue_rep_base::n_queue &amp; (base.my_rep-&gt;items_per_page-1);
+<a name="l00306"></a>00306                 <span class="keywordflow">if</span>( last_index==0 ) last_index = base.my_rep-&gt;items_per_page;
+<a name="l00307"></a>00307 
+<a name="l00308"></a>00308                 cur_page-&gt;next = make_copy( base, srcp, 0, last_index, g_index );
+<a name="l00309"></a>00309                 cur_page = cur_page-&gt;next;
+<a name="l00310"></a>00310             }
+<a name="l00311"></a>00311             tail_page = cur_page;
+<a name="l00312"></a>00312         } __TBB_CATCH (...) {
+<a name="l00313"></a>00313             invalidate_page_and_rethrow( g_index );
+<a name="l00314"></a>00314         }
+<a name="l00315"></a>00315     } <span class="keywordflow">else</span> {
+<a name="l00316"></a>00316         head_page = tail_page = NULL;
+<a name="l00317"></a>00317     }
+<a name="l00318"></a>00318     <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00319"></a>00319 }
+<a name="l00320"></a>00320 
+<a name="l00321"></a>00321 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00322"></a>00322 <span class="keywordtype">void</span> micro_queue&lt;T&gt;::invalidate_page_and_rethrow( ticket k ) {
+<a name="l00323"></a>00323     <span class="comment">// Append an invalid page at address 1 so that no more pushes are allowed.</span>
+<a name="l00324"></a>00324     page* invalid_page = (page*)uintptr_t(1);
+<a name="l00325"></a>00325     {
+<a name="l00326"></a>00326         spin_mutex::scoped_lock lock( page_mutex );
+<a name="l00327"></a>00327         tail_counter = k+concurrent_queue_rep_base::n_queue+1;
+<a name="l00328"></a>00328         page* q = tail_page;
+<a name="l00329"></a>00329         <span class="keywordflow">if</span>( is_valid_page(q) )
+<a name="l00330"></a>00330             q-&gt;next = invalid_page;
+<a name="l00331"></a>00331         <span class="keywordflow">else</span>
+<a name="l00332"></a>00332             head_page = invalid_page;
+<a name="l00333"></a>00333         tail_page = invalid_page;
+<a name="l00334"></a>00334     }
+<a name="l00335"></a>00335     __TBB_RETHROW();
+<a name="l00336"></a>00336 }
+<a name="l00337"></a>00337 
+<a name="l00338"></a>00338 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00339"></a>00339 concurrent_queue_rep_base::page* micro_queue&lt;T&gt;::make_copy( concurrent_queue_base_v3&lt;T&gt;&amp; base, <span class="keyword">const</span> concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket&amp; g_index ) {
+<a name="l00340"></a>00340     concurrent_queue_page_allocator&amp; pa = base;
+<a name="l00341"></a>00341     page* new_page = pa.allocate_page();
+<a name="l00342"></a>00342     new_page-&gt;next = NULL;
+<a name="l00343"></a>00343     new_page-&gt;mask = src_page-&gt;mask;
+<a name="l00344"></a>00344     <span class="keywordflow">for</span>( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )
+<a name="l00345"></a>00345         <span class="keywordflow">if</span>( new_page-&gt;mask &amp; uintptr_t(1)&lt;&lt;begin_in_page )
+<a name="l00346"></a>00346             copy_item( *new_page, begin_in_page, *src_page, begin_in_page );
+<a name="l00347"></a>00347     <span class="keywordflow">return</span> new_page;
+<a name="l00348"></a>00348 }
+<a name="l00349"></a>00349 
+<a name="l00350"></a>00350 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00351"></a>00351 <span class="keyword">class </span>micro_queue_pop_finalizer: no_copy {
+<a name="l00352"></a>00352     <span class="keyword">typedef</span> concurrent_queue_rep_base::page page;
+<a name="l00353"></a>00353     ticket my_ticket;
+<a name="l00354"></a>00354     micro_queue&lt;T&gt;&amp; my_queue;
+<a name="l00355"></a>00355     page* my_page; 
+<a name="l00356"></a>00356     concurrent_queue_page_allocator&amp; allocator;
+<a name="l00357"></a>00357 <span class="keyword">public</span>:
+<a name="l00358"></a>00358     micro_queue_pop_finalizer( micro_queue&lt;T&gt;&amp; queue, concurrent_queue_base_v3&lt;T&gt;&amp; b, ticket k, page* p ) :
+<a name="l00359"></a>00359         my_ticket(k), my_queue(queue), my_page(p), allocator(b)
+<a name="l00360"></a>00360     {}
+<a name="l00361"></a>00361     ~micro_queue_pop_finalizer() ;
+<a name="l00362"></a>00362 };
+<a name="l00363"></a>00363 
+<a name="l00364"></a>00364 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00365"></a>00365 micro_queue_pop_finalizer&lt;T&gt;::~micro_queue_pop_finalizer() {
+<a name="l00366"></a>00366     page* p = my_page;
+<a name="l00367"></a>00367     <span class="keywordflow">if</span>( is_valid_page(p) ) {
+<a name="l00368"></a>00368         spin_mutex::scoped_lock lock( my_queue.page_mutex );
+<a name="l00369"></a>00369         page* q = p-&gt;next;
+<a name="l00370"></a>00370         my_queue.head_page = q;
+<a name="l00371"></a>00371         <span class="keywordflow">if</span>( !is_valid_page(q) ) {
+<a name="l00372"></a>00372             my_queue.tail_page = NULL;
+<a name="l00373"></a>00373         }
+<a name="l00374"></a>00374     }
+<a name="l00375"></a>00375     my_queue.head_counter = my_ticket;
+<a name="l00376"></a>00376     <span class="keywordflow">if</span>( is_valid_page(p) ) {
+<a name="l00377"></a>00377         allocator.deallocate_page( p );
+<a name="l00378"></a>00378     }
+<a name="l00379"></a>00379 }
+<a name="l00380"></a>00380 
+<a name="l00381"></a>00381 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00382"></a>00382 <span class="preprocessor"></span><span class="preprocessor">#pragma warning( pop )</span>
+<a name="l00383"></a>00383 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4146 is back</span>
+<a name="l00384"></a>00384 <span class="preprocessor"></span>
+<a name="l00385"></a>00385 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>concurrent_queue_iterator_rep ;
+<a name="l00386"></a>00386 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>concurrent_queue_iterator_base_v3;
+<a name="l00387"></a>00387 
+<a name="l00389"></a>00389 
+<a name="l00392"></a>00392 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00393"></a>00393 <span class="keyword">struct </span>concurrent_queue_rep : <span class="keyword">public</span> concurrent_queue_rep_base {
+<a name="l00394"></a>00394     micro_queue&lt;T&gt; array[n_queue];
+<a name="l00395"></a>00395 
+<a name="l00397"></a>00397     <span class="keyword">static</span> size_t index( ticket k ) {
+<a name="l00398"></a>00398         <span class="keywordflow">return</span> k*phi%n_queue;
+<a name="l00399"></a>00399     }
+<a name="l00400"></a>00400 
+<a name="l00401"></a>00401     micro_queue&lt;T&gt;&amp; choose( ticket k ) {
+<a name="l00402"></a>00402         <span class="comment">// The formula here approximates LRU in a cache-oblivious way.</span>
+<a name="l00403"></a>00403         <span class="keywordflow">return</span> array[index(k)];
+<a name="l00404"></a>00404     }
+<a name="l00405"></a>00405 };
+<a name="l00406"></a>00406 
+<a name="l00408"></a>00408 
+<a name="l00412"></a>00412 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00413"></a>00413 <span class="keyword">class </span>concurrent_queue_base_v3: <span class="keyword">public</span> concurrent_queue_page_allocator {
+<a name="l00415"></a>00415     concurrent_queue_rep&lt;T&gt;* my_rep;
+<a name="l00416"></a>00416 
+<a name="l00417"></a>00417     <span class="keyword">friend</span> <span class="keyword">struct </span>concurrent_queue_rep&lt;T&gt;;
+<a name="l00418"></a>00418     <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue&lt;T&gt;;
+<a name="l00419"></a>00419     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_iterator_rep&lt;T&gt;;
+<a name="l00420"></a>00420     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_iterator_base_v3&lt;T&gt;;
+<a name="l00421"></a>00421 
+<a name="l00422"></a>00422 <span class="keyword">protected</span>:
+<a name="l00423"></a>00423     <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_queue_rep&lt;T&gt;::page page;
+<a name="l00424"></a>00424 
+<a name="l00425"></a>00425 <span class="keyword">private</span>:
+<a name="l00426"></a>00426     <span class="keyword">typedef</span> <span class="keyword">typename</span> micro_queue&lt;T&gt;::padded_page padded_page;
+<a name="l00427"></a>00427 
+<a name="l00428"></a>00428     <span class="comment">/* override */</span> <span class="keyword">virtual</span> page *allocate_page() {
+<a name="l00429"></a>00429         concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00430"></a>00430         size_t n = <span class="keyword">sizeof</span>(padded_page) + (r.items_per_page-1)*<span class="keyword">sizeof</span>(T);
+<a name="l00431"></a>00431         <span class="keywordflow">return</span> reinterpret_cast&lt;page*&gt;(allocate_block ( n ));
+<a name="l00432"></a>00432     }
+<a name="l00433"></a>00433 
+<a name="l00434"></a>00434     <span class="comment">/* override */</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_page( concurrent_queue_rep_base::page *p ) {
+<a name="l00435"></a>00435         concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00436"></a>00436         size_t n = <span class="keyword">sizeof</span>(padded_page) + (r.items_per_page-1)*<span class="keyword">sizeof</span>(T);
+<a name="l00437"></a>00437         deallocate_block( reinterpret_cast&lt;void*&gt;(p), n );
+<a name="l00438"></a>00438     }
+<a name="l00439"></a>00439 
+<a name="l00441"></a>00441     <span class="keyword">virtual</span> <span class="keywordtype">void</span> *allocate_block( size_t n ) = 0;
+<a name="l00442"></a>00442 
+<a name="l00444"></a>00444     <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_block( <span class="keywordtype">void</span> *p, size_t n ) = 0;
+<a name="l00445"></a>00445 
+<a name="l00446"></a>00446 <span class="keyword">protected</span>:
+<a name="l00447"></a>00447     concurrent_queue_base_v3();
+<a name="l00448"></a>00448 
+<a name="l00449"></a>00449     <span class="comment">/* override */</span> <span class="keyword">virtual</span> ~concurrent_queue_base_v3() {
+<a name="l00450"></a>00450 <span class="preprocessor">#if __TBB_USE_ASSERT</span>
+<a name="l00451"></a>00451 <span class="preprocessor"></span>        size_t nq = my_rep-&gt;n_queue;
+<a name="l00452"></a>00452         <span class="keywordflow">for</span>( size_t i=0; i&lt;nq; i++ )
+<a name="l00453"></a>00453             __TBB_ASSERT( my_rep-&gt;array[i].tail_page==NULL, <span class="stringliteral">"pages were not freed properly"</span> );
+<a name="l00454"></a>00454 #endif <span class="comment">/* __TBB_USE_ASSERT */</span>
+<a name="l00455"></a>00455         cache_aligned_allocator&lt;concurrent_queue_rep&lt;T&gt; &gt;().deallocate(my_rep,1);
+<a name="l00456"></a>00456     }
+<a name="l00457"></a>00457 
+<a name="l00459"></a>00459     <span class="keywordtype">void</span> internal_push( <span class="keyword">const</span> <span class="keywordtype">void</span>* src ) {
+<a name="l00460"></a>00460         concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00461"></a>00461         ticket k = r.tail_counter++;
+<a name="l00462"></a>00462         r.choose(k).push( src, k, *<span class="keyword">this</span> );
+<a name="l00463"></a>00463     }
+<a name="l00464"></a>00464 
+<a name="l00466"></a>00466 
+<a name="l00467"></a>00467     <span class="keywordtype">bool</span> internal_try_pop( <span class="keywordtype">void</span>* dst ) ;
+<a name="l00468"></a>00468 
+<a name="l00470"></a>00470     size_t internal_size() <span class="keyword">const </span>;
+<a name="l00471"></a>00471 
+<a name="l00473"></a>00473     <span class="keywordtype">bool</span> internal_empty() <span class="keyword">const </span>;
+<a name="l00474"></a>00474 
+<a name="l00476"></a>00476     <span class="comment">/* note that the name may be misleading, but it remains so due to a historical accident. */</span>
+<a name="l00477"></a>00477     <span class="keywordtype">void</span> internal_finish_clear() ;
+<a name="l00478"></a>00478 
+<a name="l00480"></a>00480     <span class="keywordtype">void</span> internal_throw_exception()<span class="keyword"> const </span>{
+<a name="l00481"></a>00481         throw_exception( eid_bad_alloc );
+<a name="l00482"></a>00482     }
+<a name="l00483"></a>00483 
+<a name="l00485"></a>00485     <span class="keywordtype">void</span> assign( <span class="keyword">const</span> concurrent_queue_base_v3&amp; src ) ;
+<a name="l00486"></a>00486 };
+<a name="l00487"></a>00487 
+<a name="l00488"></a>00488 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00489"></a>00489 concurrent_queue_base_v3&lt;T&gt;::concurrent_queue_base_v3() {
+<a name="l00490"></a>00490     <span class="keyword">const</span> size_t item_size = <span class="keyword">sizeof</span>(T);
+<a name="l00491"></a>00491     my_rep = cache_aligned_allocator&lt;concurrent_queue_rep&lt;T&gt; &gt;().allocate(1);
+<a name="l00492"></a>00492     __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, <span class="stringliteral">"alignment error"</span> );
+<a name="l00493"></a>00493     __TBB_ASSERT( (size_t)&amp;my_rep-&gt;head_counter % NFS_GetLineSize()==0, <span class="stringliteral">"alignment error"</span> );
+<a name="l00494"></a>00494     __TBB_ASSERT( (size_t)&amp;my_rep-&gt;tail_counter % NFS_GetLineSize()==0, <span class="stringliteral">"alignment error"</span> );
+<a name="l00495"></a>00495     __TBB_ASSERT( (size_t)&amp;my_rep-&gt;array % NFS_GetLineSize()==0, <span class="stringliteral">"alignment error"</span> );
+<a name="l00496"></a>00496     memset(my_rep,0,<span class="keyword">sizeof</span>(concurrent_queue_rep&lt;T&gt;));
+<a name="l00497"></a>00497     my_rep-&gt;item_size = item_size;
+<a name="l00498"></a>00498     my_rep-&gt;items_per_page = item_size&lt;=8 ? 32 :
+<a name="l00499"></a>00499                              item_size&lt;=16 ? 16 : 
+<a name="l00500"></a>00500                              item_size&lt;=32 ? 8 :
+<a name="l00501"></a>00501                              item_size&lt;=64 ? 4 :
+<a name="l00502"></a>00502                              item_size&lt;=128 ? 2 :
+<a name="l00503"></a>00503                              1;
+<a name="l00504"></a>00504 }
+<a name="l00505"></a>00505 
+<a name="l00506"></a>00506 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00507"></a>00507 <span class="keywordtype">bool</span> concurrent_queue_base_v3&lt;T&gt;::internal_try_pop( <span class="keywordtype">void</span>* dst ) {
+<a name="l00508"></a>00508     concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00509"></a>00509     ticket k;
+<a name="l00510"></a>00510     <span class="keywordflow">do</span> {
+<a name="l00511"></a>00511         k = r.head_counter;
+<a name="l00512"></a>00512         <span class="keywordflow">for</span>(;;) {
+<a name="l00513"></a>00513             <span class="keywordflow">if</span>( r.tail_counter&lt;=k ) {
+<a name="l00514"></a>00514                 <span class="comment">// Queue is empty </span>
+<a name="l00515"></a>00515                 <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00516"></a>00516             }
+<a name="l00517"></a>00517             <span class="comment">// Queue had item with ticket k when we looked.  Attempt to get that item.</span>
+<a name="l00518"></a>00518             ticket tk=k;
+<a name="l00519"></a>00519 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; defined(_Wp64)</span>
+<a name="l00520"></a>00520 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00521"></a>00521 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4267)</span>
+<a name="l00522"></a>00522 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00523"></a>00523 <span class="preprocessor"></span>            k = r.head_counter.compare_and_swap( tk+1, tk );
+<a name="l00524"></a>00524 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; defined(_Wp64)</span>
+<a name="l00525"></a>00525 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00526"></a>00526 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00527"></a>00527 <span class="preprocessor"></span>            <span class="keywordflow">if</span>( k==tk )
+<a name="l00528"></a>00528                 <span class="keywordflow">break</span>;
+<a name="l00529"></a>00529             <span class="comment">// Another thread snatched the item, retry.</span>
+<a name="l00530"></a>00530         }
+<a name="l00531"></a>00531     } <span class="keywordflow">while</span>( !r.choose( k ).pop( dst, k, *<span class="keyword">this</span> ) );
+<a name="l00532"></a>00532     <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00533"></a>00533 }
+<a name="l00534"></a>00534 
+<a name="l00535"></a>00535 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00536"></a>00536 size_t concurrent_queue_base_v3&lt;T&gt;::internal_size()<span class="keyword"> const </span>{
+<a name="l00537"></a>00537     concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00538"></a>00538     __TBB_ASSERT( <span class="keyword">sizeof</span>(ptrdiff_t)&lt;=<span class="keyword">sizeof</span>(size_t), NULL );
+<a name="l00539"></a>00539     ticket hc = r.head_counter;
+<a name="l00540"></a>00540     size_t nie = r.n_invalid_entries;
+<a name="l00541"></a>00541     ticket tc = r.tail_counter;
+<a name="l00542"></a>00542     __TBB_ASSERT( hc!=tc || !nie, NULL );
+<a name="l00543"></a>00543     ptrdiff_t sz = tc-hc-nie;
+<a name="l00544"></a>00544     <span class="keywordflow">return</span> sz&lt;0 ? 0 :  size_t(sz);
+<a name="l00545"></a>00545 }
+<a name="l00546"></a>00546 
+<a name="l00547"></a>00547 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00548"></a>00548 <span class="keywordtype">bool</span> concurrent_queue_base_v3&lt;T&gt;::internal_empty()<span class="keyword"> const </span>{
+<a name="l00549"></a>00549     concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00550"></a>00550     ticket tc = r.tail_counter;
+<a name="l00551"></a>00551     ticket hc = r.head_counter;
+<a name="l00552"></a>00552     <span class="comment">// if tc!=r.tail_counter, the queue was not empty at some point between the two reads.</span>
+<a name="l00553"></a>00553     <span class="keywordflow">return</span> tc==r.tail_counter &amp;&amp; tc==hc+r.n_invalid_entries ;
+<a name="l00554"></a>00554 }
+<a name="l00555"></a>00555 
+<a name="l00556"></a>00556 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00557"></a>00557 <span class="keywordtype">void</span> concurrent_queue_base_v3&lt;T&gt;::internal_finish_clear() {
+<a name="l00558"></a>00558     concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00559"></a>00559     size_t nq = r.n_queue;
+<a name="l00560"></a>00560     <span class="keywordflow">for</span>( size_t i=0; i&lt;nq; ++i ) {
+<a name="l00561"></a>00561         page* tp = r.array[i].tail_page;
+<a name="l00562"></a>00562         <span class="keywordflow">if</span>( is_valid_page(tp) ) {
+<a name="l00563"></a>00563             __TBB_ASSERT( r.array[i].head_page==tp, <span class="stringliteral">"at most one page should remain"</span> );
+<a name="l00564"></a>00564             deallocate_page( tp );
+<a name="l00565"></a>00565             r.array[i].tail_page = NULL;
+<a name="l00566"></a>00566         } <span class="keywordflow">else</span> 
+<a name="l00567"></a>00567             __TBB_ASSERT( !is_valid_page(r.array[i].head_page), <span class="stringliteral">"head page pointer corrupt?"</span> );
+<a name="l00568"></a>00568     }
+<a name="l00569"></a>00569 }
+<a name="l00570"></a>00570 
+<a name="l00571"></a>00571 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00572"></a>00572 <span class="keywordtype">void</span> concurrent_queue_base_v3&lt;T&gt;::assign( <span class="keyword">const</span> concurrent_queue_base_v3&amp; src ) {
+<a name="l00573"></a>00573     concurrent_queue_rep&lt;T&gt;&amp; r = *my_rep;
+<a name="l00574"></a>00574     r.items_per_page = src.my_rep-&gt;items_per_page;
+<a name="l00575"></a>00575 
+<a name="l00576"></a>00576     <span class="comment">// copy concurrent_queue_rep.</span>
+<a name="l00577"></a>00577     r.head_counter = src.my_rep-&gt;head_counter;
+<a name="l00578"></a>00578     r.tail_counter = src.my_rep-&gt;tail_counter;
+<a name="l00579"></a>00579     r.n_invalid_entries = src.my_rep-&gt;n_invalid_entries;
+<a name="l00580"></a>00580 
+<a name="l00581"></a>00581     <span class="comment">// copy micro_queues</span>
+<a name="l00582"></a>00582     <span class="keywordflow">for</span>( size_t i = 0; i&lt;r.n_queue; ++i )
+<a name="l00583"></a>00583         r.array[i].assign( src.my_rep-&gt;array[i], *<span class="keyword">this</span>);
+<a name="l00584"></a>00584 
+<a name="l00585"></a>00585     __TBB_ASSERT( r.head_counter==src.my_rep-&gt;head_counter &amp;&amp; r.tail_counter==src.my_rep-&gt;tail_counter, 
+<a name="l00586"></a>00586             <span class="stringliteral">"the source concurrent queue should not be concurrently modified."</span> );
+<a name="l00587"></a>00587 }
+<a name="l00588"></a>00588 
+<a name="l00589"></a>00589 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt; <span class="keyword">class </span>concurrent_queue_iterator;
+<a name="l00590"></a>00590 
+<a name="l00591"></a>00591 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00592"></a>00592 <span class="keyword">class </span>concurrent_queue_iterator_rep: no_assign {
+<a name="l00593"></a>00593     <span class="keyword">typedef</span> <span class="keyword">typename</span> micro_queue&lt;T&gt;::padded_page padded_page;
+<a name="l00594"></a>00594 <span class="keyword">public</span>:
+<a name="l00595"></a>00595     ticket head_counter;
+<a name="l00596"></a>00596     <span class="keyword">const</span> concurrent_queue_base_v3&lt;T&gt;&amp; my_queue;
+<a name="l00597"></a>00597     <span class="keyword">typename</span> concurrent_queue_base_v3&lt;T&gt;::page* array[concurrent_queue_rep&lt;T&gt;::n_queue];
+<a name="l00598"></a>00598     concurrent_queue_iterator_rep( <span class="keyword">const</span> concurrent_queue_base_v3&lt;T&gt;&amp; queue ) :
+<a name="l00599"></a>00599         head_counter(queue.my_rep-&gt;head_counter),
+<a name="l00600"></a>00600         my_queue(queue)
+<a name="l00601"></a>00601     {
+<a name="l00602"></a>00602         <span class="keywordflow">for</span>( size_t k=0; k&lt;concurrent_queue_rep&lt;T&gt;::n_queue; ++k )
+<a name="l00603"></a>00603             array[k] = queue.my_rep-&gt;array[k].head_page;
+<a name="l00604"></a>00604     }
+<a name="l00605"></a>00605 
+<a name="l00607"></a>00607     <span class="keywordtype">bool</span> get_item( T*&amp; item, size_t k ) ;
+<a name="l00608"></a>00608 };
+<a name="l00609"></a>00609 
+<a name="l00610"></a>00610 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00611"></a>00611 <span class="keywordtype">bool</span> concurrent_queue_iterator_rep&lt;T&gt;::get_item( T*&amp; item, size_t k ) {
+<a name="l00612"></a>00612     <span class="keywordflow">if</span>( k==my_queue.my_rep-&gt;tail_counter ) {
+<a name="l00613"></a>00613         item = NULL;
+<a name="l00614"></a>00614         <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00615"></a>00615     } <span class="keywordflow">else</span> {
+<a name="l00616"></a>00616         <span class="keyword">typename</span> concurrent_queue_base_v3&lt;T&gt;::page* p = array[concurrent_queue_rep&lt;T&gt;::index(k)];
+<a name="l00617"></a>00617         __TBB_ASSERT(p,NULL);
+<a name="l00618"></a>00618         size_t i = k/concurrent_queue_rep&lt;T&gt;::n_queue &amp; (my_queue.my_rep-&gt;items_per_page-1);
+<a name="l00619"></a>00619         item = &amp;micro_queue&lt;T&gt;::get_ref(*p,i);
+<a name="l00620"></a>00620         <span class="keywordflow">return</span> (p-&gt;mask &amp; uintptr_t(1)&lt;&lt;i)!=0;
+<a name="l00621"></a>00621     }
+<a name="l00622"></a>00622 }
+<a name="l00623"></a>00623 
+<a name="l00625"></a>00625 
+<a name="l00626"></a>00626 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Value&gt;
+<a name="l00627"></a>00627 <span class="keyword">class </span>concurrent_queue_iterator_base_v3 : no_assign {
+<a name="l00629"></a>00629 
+<a name="l00630"></a>00630     concurrent_queue_iterator_rep&lt;Value&gt;* my_rep;
+<a name="l00631"></a>00631 
+<a name="l00632"></a>00632     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00633"></a>00633     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j );
+<a name="l00634"></a>00634 
+<a name="l00635"></a>00635     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00636"></a>00636     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j );
+<a name="l00637"></a>00637 <span class="keyword">protected</span>:
+<a name="l00639"></a>00639     Value* my_item;
+<a name="l00640"></a>00640 
+<a name="l00642"></a>00642     concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {
+<a name="l00643"></a>00643 <span class="preprocessor">#if __GNUC__==4&amp;&amp;__GNUC_MINOR__==3</span>
+<a name="l00644"></a>00644 <span class="preprocessor"></span>        <span class="comment">// to get around a possible gcc 4.3 bug</span>
+<a name="l00645"></a>00645         __TBB_release_consistency_helper();
+<a name="l00646"></a>00646 <span class="preprocessor">#endif</span>
+<a name="l00647"></a>00647 <span class="preprocessor"></span>    }
+<a name="l00648"></a>00648 
+<a name="l00650"></a>00650     concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_iterator_base_v3&amp; i )
+<a name="l00651"></a>00651     : no_assign(), my_rep(NULL), my_item(NULL) {
+<a name="l00652"></a>00652         assign(i);
+<a name="l00653"></a>00653     }
+<a name="l00654"></a>00654 
+<a name="l00656"></a>00656     concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_base_v3&lt;Value&gt;&amp; queue ) ;
+<a name="l00657"></a>00657 
+<a name="l00659"></a>00659     <span class="keywordtype">void</span> assign( <span class="keyword">const</span> concurrent_queue_iterator_base_v3&lt;Value&gt;&amp; other ) ;
+<a name="l00660"></a>00660 
+<a name="l00662"></a>00662     <span class="keywordtype">void</span> advance() ;
+<a name="l00663"></a>00663 
+<a name="l00665"></a>00665     ~concurrent_queue_iterator_base_v3() {
+<a name="l00666"></a>00666         cache_aligned_allocator&lt;concurrent_queue_iterator_rep&lt;Value&gt; &gt;().deallocate(my_rep, 1);
+<a name="l00667"></a>00667         my_rep = NULL;
+<a name="l00668"></a>00668     }
+<a name="l00669"></a>00669 };
+<a name="l00670"></a>00670 
+<a name="l00671"></a>00671 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Value&gt;
+<a name="l00672"></a>00672 concurrent_queue_iterator_base_v3&lt;Value&gt;::concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_base_v3&lt;Value&gt;&amp; queue ) {
+<a name="l00673"></a>00673     my_rep = cache_aligned_allocator&lt;concurrent_queue_iterator_rep&lt;Value&gt; &gt;().allocate(1);
+<a name="l00674"></a>00674     <span class="keyword">new</span>( my_rep ) concurrent_queue_iterator_rep&lt;Value&gt;(queue);
+<a name="l00675"></a>00675     size_t k = my_rep-&gt;head_counter;
+<a name="l00676"></a>00676     <span class="keywordflow">if</span>( !my_rep-&gt;get_item(my_item, k) ) advance();
+<a name="l00677"></a>00677 }
+<a name="l00678"></a>00678 
+<a name="l00679"></a>00679 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Value&gt;
+<a name="l00680"></a>00680 <span class="keywordtype">void</span> concurrent_queue_iterator_base_v3&lt;Value&gt;::assign( <span class="keyword">const</span> concurrent_queue_iterator_base_v3&lt;Value&gt;&amp; other ) {
+<a name="l00681"></a>00681     <span class="keywordflow">if</span>( my_rep!=other.my_rep ) {
+<a name="l00682"></a>00682         <span class="keywordflow">if</span>( my_rep ) {
+<a name="l00683"></a>00683             cache_aligned_allocator&lt;concurrent_queue_iterator_rep&lt;Value&gt; &gt;().deallocate(my_rep, 1);
+<a name="l00684"></a>00684             my_rep = NULL;
+<a name="l00685"></a>00685         }
+<a name="l00686"></a>00686         <span class="keywordflow">if</span>( other.my_rep ) {
+<a name="l00687"></a>00687             my_rep = cache_aligned_allocator&lt;concurrent_queue_iterator_rep&lt;Value&gt; &gt;().allocate(1);
+<a name="l00688"></a>00688             <span class="keyword">new</span>( my_rep ) concurrent_queue_iterator_rep&lt;Value&gt;( *other.my_rep );
+<a name="l00689"></a>00689         }
+<a name="l00690"></a>00690     }
+<a name="l00691"></a>00691     my_item = other.my_item;
+<a name="l00692"></a>00692 }
+<a name="l00693"></a>00693 
+<a name="l00694"></a>00694 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Value&gt;
+<a name="l00695"></a>00695 <span class="keywordtype">void</span> concurrent_queue_iterator_base_v3&lt;Value&gt;::advance() {
+<a name="l00696"></a>00696     __TBB_ASSERT( my_item, <span class="stringliteral">"attempt to increment iterator past end of queue"</span> );  
+<a name="l00697"></a>00697     size_t k = my_rep-&gt;head_counter;
+<a name="l00698"></a>00698     <span class="keyword">const</span> concurrent_queue_base_v3&lt;Value&gt;&amp; queue = my_rep-&gt;my_queue;
+<a name="l00699"></a>00699 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00700"></a>00700 <span class="preprocessor"></span>    Value* tmp;
+<a name="l00701"></a>00701     my_rep-&gt;get_item(tmp,k);
+<a name="l00702"></a>00702     __TBB_ASSERT( my_item==tmp, NULL );
+<a name="l00703"></a>00703 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00704"></a>00704     size_t i = k/concurrent_queue_rep&lt;Value&gt;::n_queue &amp; (queue.my_rep-&gt;items_per_page-1);
+<a name="l00705"></a>00705     <span class="keywordflow">if</span>( i==queue.my_rep-&gt;items_per_page-1 ) {
+<a name="l00706"></a>00706         <span class="keyword">typename</span> concurrent_queue_base_v3&lt;Value&gt;::page*&amp; root = my_rep-&gt;array[concurrent_queue_rep&lt;Value&gt;::index(k)];
+<a name="l00707"></a>00707         root = root-&gt;next;
+<a name="l00708"></a>00708     }
+<a name="l00709"></a>00709     <span class="comment">// advance k</span>
+<a name="l00710"></a>00710     my_rep-&gt;head_counter = ++k;
+<a name="l00711"></a>00711     <span class="keywordflow">if</span>( !my_rep-&gt;get_item(my_item, k) ) advance();
+<a name="l00712"></a>00712 }
+<a name="l00713"></a>00713 
+<a name="l00715"></a>00715 
+<a name="l00716"></a>00716 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">struct </span>tbb_remove_cv {<span class="keyword">typedef</span> T type;};
+<a name="l00717"></a>00717 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">struct </span>tbb_remove_cv&lt;const T&gt; {<span class="keyword">typedef</span> T type;};
+<a name="l00718"></a>00718 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">struct </span>tbb_remove_cv&lt;volatile T&gt; {<span class="keyword">typedef</span> T type;};
+<a name="l00719"></a>00719 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">struct </span>tbb_remove_cv&lt;const volatile T&gt; {<span class="keyword">typedef</span> T type;};
+<a name="l00720"></a>00720 
+<a name="l00722"></a>00722 
+<a name="l00724"></a>00724 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00725"></a>00725 <span class="keyword">class </span>concurrent_queue_iterator: <span class="keyword">public</span> concurrent_queue_iterator_base_v3&lt;typename tbb_remove_cv&lt;Value&gt;::type&gt;,
+<a name="l00726"></a>00726         <span class="keyword">public</span> std::iterator&lt;std::forward_iterator_tag,Value&gt; {
+<a name="l00727"></a>00727 <span class="preprocessor">#if !__TBB_TEMPLATE_FRIENDS_BROKEN</span>
+<a name="l00728"></a>00728 <span class="preprocessor"></span>    <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00729"></a>00729     <span class="keyword">friend</span> class ::tbb::strict_ppl::concurrent_queue;
+<a name="l00730"></a>00730 <span class="preprocessor">#else</span>
+<a name="l00731"></a>00731 <span class="preprocessor"></span><span class="keyword">public</span>: <span class="comment">// workaround for MSVC</span>
+<a name="l00732"></a>00732 <span class="preprocessor">#endif </span>
+<a name="l00734"></a>00734 <span class="preprocessor">    concurrent_queue_iterator( const concurrent_queue_base_v3&lt;Value&gt;&amp; queue ) :</span>
+<a name="l00735"></a>00735 <span class="preprocessor"></span>        concurrent_queue_iterator_base_v3&lt;typename tbb_remove_cv&lt;Value&gt;::type&gt;(queue)
+<a name="l00736"></a>00736     {
+<a name="l00737"></a>00737     }
+<a name="l00738"></a>00738 
+<a name="l00739"></a>00739 <span class="keyword">public</span>:
+<a name="l00740"></a>00740     concurrent_queue_iterator() {}
+<a name="l00741"></a>00741 
+<a name="l00742"></a>00742     concurrent_queue_iterator( <span class="keyword">const</span> concurrent_queue_iterator&lt;Container,typename Container::value_type&gt;&amp; other ) :
+<a name="l00743"></a>00743         concurrent_queue_iterator_base_v3&lt;typename tbb_remove_cv&lt;Value&gt;::type&gt;(other)
+<a name="l00744"></a>00744     {}
+<a name="l00745"></a>00745 
+<a name="l00747"></a>00747     concurrent_queue_iterator&amp; operator=( <span class="keyword">const</span> concurrent_queue_iterator&amp; other ) {
+<a name="l00748"></a>00748         this-&gt;assign(other);
+<a name="l00749"></a>00749         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00750"></a>00750     }
+<a name="l00751"></a>00751 
+<a name="l00753"></a>00753     Value&amp; operator*()<span class="keyword"> const </span>{
+<a name="l00754"></a>00754         <span class="keywordflow">return</span> *static_cast&lt;Value*&gt;(this-&gt;my_item);
+<a name="l00755"></a>00755     }
+<a name="l00756"></a>00756 
+<a name="l00757"></a>00757     Value* operator-&gt;()<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00758"></a>00758 
+<a name="l00760"></a>00760     concurrent_queue_iterator&amp; operator++() {
+<a name="l00761"></a>00761         this-&gt;advance();
+<a name="l00762"></a>00762         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00763"></a>00763     }
+<a name="l00764"></a>00764 
+<a name="l00766"></a>00766     Value* operator++(<span class="keywordtype">int</span>) {
+<a name="l00767"></a>00767         Value* result = &amp;operator*();
+<a name="l00768"></a>00768         operator++();
+<a name="l00769"></a>00769         <span class="keywordflow">return</span> result;
+<a name="l00770"></a>00770     }
+<a name="l00771"></a>00771 }; <span class="comment">// concurrent_queue_iterator</span>
+<a name="l00772"></a>00772 
+<a name="l00773"></a>00773 
+<a name="l00774"></a>00774 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00775"></a>00775 <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j ) {
+<a name="l00776"></a>00776     <span class="keywordflow">return</span> i.my_item==j.my_item;
+<a name="l00777"></a>00777 }
+<a name="l00778"></a>00778 
+<a name="l00779"></a>00779 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00780"></a>00780 <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j ) {
+<a name="l00781"></a>00781     <span class="keywordflow">return</span> i.my_item!=j.my_item;
+<a name="l00782"></a>00782 }
+<a name="l00783"></a>00783 
+<a name="l00784"></a>00784 } <span class="comment">// namespace internal</span>
+<a name="l00785"></a>00785 
+<a name="l00787"></a>00787 
+<a name="l00788"></a>00788 } <span class="comment">// namespace strict_ppl</span>
+<a name="l00789"></a>00789 
+<a name="l00791"></a>00791 <span class="keyword">namespace </span>internal {
+<a name="l00792"></a>00792 
+<a name="l00793"></a>00793 <span class="keyword">class </span>concurrent_queue_rep;
+<a name="l00794"></a>00794 <span class="keyword">class </span>concurrent_queue_iterator_rep;
+<a name="l00795"></a>00795 <span class="keyword">class </span>concurrent_queue_iterator_base_v3;
+<a name="l00796"></a>00796 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt; <span class="keyword">class </span>concurrent_queue_iterator;
+<a name="l00797"></a>00797 
+<a name="l00799"></a>00799 
+<a name="l00801"></a>00801 <span class="keyword">class </span>concurrent_queue_base_v3: no_copy {
+<a name="l00803"></a>00803     concurrent_queue_rep* my_rep;
+<a name="l00804"></a>00804 
+<a name="l00805"></a>00805     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_rep;
+<a name="l00806"></a>00806     <span class="keyword">friend</span> <span class="keyword">struct </span>micro_queue;
+<a name="l00807"></a>00807     <span class="keyword">friend</span> <span class="keyword">class </span>micro_queue_pop_finalizer;
+<a name="l00808"></a>00808     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_iterator_rep;
+<a name="l00809"></a>00809     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_queue_iterator_base_v3;
+<a name="l00810"></a>00810 <span class="keyword">protected</span>:
+<a name="l00812"></a>00812     <span class="keyword">struct </span>page {
+<a name="l00813"></a>00813         page* next;
+<a name="l00814"></a>00814         uintptr_t mask; 
+<a name="l00815"></a>00815     };
+<a name="l00816"></a>00816 
+<a name="l00818"></a>00818     ptrdiff_t my_capacity;
+<a name="l00819"></a>00819    
+<a name="l00821"></a>00821     size_t items_per_page;
+<a name="l00822"></a>00822 
+<a name="l00824"></a>00824     size_t item_size;
+<a name="l00825"></a>00825 
+<a name="l00826"></a>00826 <span class="preprocessor">#if __TBB_GCC_3_3_PROTECTED_BROKEN</span>
+<a name="l00827"></a>00827 <span class="preprocessor"></span><span class="keyword">public</span>:
+<a name="l00828"></a>00828 <span class="preprocessor">#endif</span>
+<a name="l00829"></a>00829 <span class="preprocessor"></span>    <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00830"></a>00830     <span class="keyword">struct </span>padded_page: page {
+<a name="l00832"></a>00832         padded_page(); 
+<a name="l00834"></a>00834         <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> padded_page&amp; );
+<a name="l00836"></a>00836         T last;
+<a name="l00837"></a>00837     };
+<a name="l00838"></a>00838 
+<a name="l00839"></a>00839 <span class="keyword">private</span>:
+<a name="l00840"></a>00840     <span class="keyword">virtual</span> <span class="keywordtype">void</span> copy_item( page&amp; dst, size_t index, <span class="keyword">const</span> <span class="keywordtype">void</span>* src ) = 0;
+<a name="l00841"></a>00841     <span class="keyword">virtual</span> <span class="keywordtype">void</span> assign_and_destroy_item( <span class="keywordtype">void</span>* dst, page&amp; src, size_t index ) = 0;
+<a name="l00842"></a>00842 <span class="keyword">protected</span>:
+<a name="l00843"></a>00843     __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size );
+<a name="l00844"></a>00844     <span class="keyword">virtual</span> __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3();
+<a name="l00845"></a>00845 
+<a name="l00847"></a>00847     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_push( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00848"></a>00848 
+<a name="l00850"></a>00850     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_pop( <span class="keywordtype">void</span>* dst );
+<a name="l00851"></a>00851 
+<a name="l00853"></a>00853     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_push_if_not_full( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00854"></a>00854 
+<a name="l00856"></a>00856 
+<a name="l00857"></a>00857     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_pop_if_present( <span class="keywordtype">void</span>* dst );
+<a name="l00858"></a>00858 
+<a name="l00860"></a>00860     ptrdiff_t __TBB_EXPORTED_METHOD internal_size() <span class="keyword">const</span>;
+<a name="l00861"></a>00861 
+<a name="l00863"></a>00863     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_empty() <span class="keyword">const</span>;
+<a name="l00864"></a>00864 
+<a name="l00866"></a>00866     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size );
+<a name="l00867"></a>00867 
+<a name="l00869"></a>00869     <span class="keyword">virtual</span> page *allocate_page() = 0;
+<a name="l00870"></a>00870 
+<a name="l00872"></a>00872     <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_page( page *p ) = 0;
+<a name="l00873"></a>00873 
+<a name="l00875"></a>00875     <span class="comment">/* note that the name may be misleading, but it remains so due to a historical accident. */</span>
+<a name="l00876"></a>00876     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_finish_clear() ;
+<a name="l00877"></a>00877 
+<a name="l00879"></a>00879     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_throw_exception() <span class="keyword">const</span>;
+<a name="l00880"></a>00880 
+<a name="l00882"></a>00882     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD assign( <span class="keyword">const</span> concurrent_queue_base_v3&amp; src ) ;
+<a name="l00883"></a>00883 
+<a name="l00884"></a>00884 <span class="keyword">private</span>:
+<a name="l00885"></a>00885     <span class="keyword">virtual</span> <span class="keywordtype">void</span> copy_page_item( page&amp; dst, size_t dindex, <span class="keyword">const</span> page&amp; src, size_t sindex ) = 0;
+<a name="l00886"></a>00886 };
+<a name="l00887"></a>00887 
+<a name="l00889"></a>00889 
+<a name="l00890"></a>00890 <span class="keyword">class </span>concurrent_queue_iterator_base_v3 {
+<a name="l00892"></a>00892 
+<a name="l00893"></a>00893     concurrent_queue_iterator_rep* my_rep;
+<a name="l00894"></a>00894 
+<a name="l00895"></a>00895     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00896"></a>00896     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j );
+<a name="l00897"></a>00897 
+<a name="l00898"></a>00898     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00899"></a>00899     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j );
+<a name="l00900"></a>00900 
+<a name="l00901"></a>00901     <span class="keywordtype">void</span> initialize( <span class="keyword">const</span> concurrent_queue_base_v3&amp; queue, size_t offset_of_data );
+<a name="l00902"></a>00902 <span class="keyword">protected</span>:
+<a name="l00904"></a>00904     <span class="keywordtype">void</span>* my_item;
+<a name="l00905"></a>00905 
+<a name="l00907"></a>00907     concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {}
+<a name="l00908"></a>00908 
+<a name="l00910"></a>00910     concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_iterator_base_v3&amp; i ) : my_rep(NULL), my_item(NULL) {
+<a name="l00911"></a>00911         assign(i);
+<a name="l00912"></a>00912     }
+<a name="l00913"></a>00913 
+<a name="l00915"></a>00915 
+<a name="l00916"></a>00916     __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_base_v3&amp; queue );
+<a name="l00917"></a>00917 
+<a name="l00919"></a>00919     __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( <span class="keyword">const</span> concurrent_queue_base_v3&amp; queue, size_t offset_of_data );
+<a name="l00920"></a>00920 
+<a name="l00922"></a>00922     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD assign( <span class="keyword">const</span> concurrent_queue_iterator_base_v3&amp; i );
+<a name="l00923"></a>00923 
+<a name="l00925"></a>00925     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD advance();
+<a name="l00926"></a>00926 
+<a name="l00928"></a>00928     __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3();
+<a name="l00929"></a>00929 };
+<a name="l00930"></a>00930 
+<a name="l00931"></a>00931 <span class="keyword">typedef</span> concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base;
+<a name="l00932"></a>00932 
+<a name="l00934"></a>00934 
+<a name="l00936"></a>00936 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00937"></a>00937 <span class="keyword">class </span>concurrent_queue_iterator: <span class="keyword">public</span> concurrent_queue_iterator_base,
+<a name="l00938"></a>00938         <span class="keyword">public</span> std::iterator&lt;std::forward_iterator_tag,Value&gt; {
+<a name="l00939"></a>00939 
+<a name="l00940"></a>00940 <span class="preprocessor">#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)</span>
+<a name="l00941"></a>00941 <span class="preprocessor"></span>    <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00942"></a>00942     <span class="keyword">friend</span> class ::tbb::concurrent_bounded_queue;
+<a name="l00943"></a>00943 
+<a name="l00944"></a>00944     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00945"></a>00945     <span class="keyword">friend</span> class ::tbb::deprecated::concurrent_queue;
+<a name="l00946"></a>00946 <span class="preprocessor">#else</span>
+<a name="l00947"></a>00947 <span class="preprocessor"></span><span class="keyword">public</span>: <span class="comment">// workaround for MSVC</span>
+<a name="l00948"></a>00948 <span class="preprocessor">#endif </span>
+<a name="l00950"></a>00950 <span class="preprocessor">    concurrent_queue_iterator( const concurrent_queue_base_v3&amp; queue ) :</span>
+<a name="l00951"></a>00951 <span class="preprocessor"></span>        concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page&lt;Value&gt;,last))
+<a name="l00952"></a>00952     {
+<a name="l00953"></a>00953     }
+<a name="l00954"></a>00954 
+<a name="l00955"></a>00955 <span class="keyword">public</span>:
+<a name="l00956"></a>00956     concurrent_queue_iterator() {}
+<a name="l00957"></a>00957 
+<a name="l00960"></a>00960     concurrent_queue_iterator( <span class="keyword">const</span> concurrent_queue_iterator&lt;Container,typename Container::value_type&gt;&amp; other ) :
+<a name="l00961"></a>00961         concurrent_queue_iterator_base_v3(other)
+<a name="l00962"></a>00962     {}
+<a name="l00963"></a>00963 
+<a name="l00965"></a>00965     concurrent_queue_iterator&amp; operator=( <span class="keyword">const</span> concurrent_queue_iterator&amp; other ) {
+<a name="l00966"></a>00966         assign(other);
+<a name="l00967"></a>00967         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00968"></a>00968     }
+<a name="l00969"></a>00969 
+<a name="l00971"></a>00971     Value&amp; operator*()<span class="keyword"> const </span>{
+<a name="l00972"></a>00972         <span class="keywordflow">return</span> *static_cast&lt;Value*&gt;(my_item);
+<a name="l00973"></a>00973     }
+<a name="l00974"></a>00974 
+<a name="l00975"></a>00975     Value* operator-&gt;()<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00976"></a>00976 
+<a name="l00978"></a>00978     concurrent_queue_iterator&amp; operator++() {
+<a name="l00979"></a>00979         advance();
+<a name="l00980"></a>00980         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00981"></a>00981     }
+<a name="l00982"></a>00982 
+<a name="l00984"></a>00984     Value* operator++(<span class="keywordtype">int</span>) {
+<a name="l00985"></a>00985         Value* result = &amp;operator*();
+<a name="l00986"></a>00986         operator++();
+<a name="l00987"></a>00987         <span class="keywordflow">return</span> result;
+<a name="l00988"></a>00988     }
+<a name="l00989"></a>00989 }; <span class="comment">// concurrent_queue_iterator</span>
+<a name="l00990"></a>00990 
+<a name="l00991"></a>00991 
+<a name="l00992"></a>00992 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00993"></a>00993 <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j ) {
+<a name="l00994"></a>00994     <span class="keywordflow">return</span> i.my_item==j.my_item;
+<a name="l00995"></a>00995 }
+<a name="l00996"></a>00996 
+<a name="l00997"></a>00997 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00998"></a>00998 <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> concurrent_queue_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> concurrent_queue_iterator&lt;C,U&gt;&amp; j ) {
+<a name="l00999"></a>00999     <span class="keywordflow">return</span> i.my_item!=j.my_item;
+<a name="l01000"></a>01000 }
+<a name="l01001"></a>01001 
+<a name="l01002"></a>01002 } <span class="comment">// namespace internal;</span>
+<a name="l01003"></a>01003 
+<a name="l01005"></a>01005 
+<a name="l01006"></a>01006 } <span class="comment">// namespace tbb</span>
+<a name="l01007"></a>01007 
+<a name="l01008"></a>01008 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_concurrent_queue_internal_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00304.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00304.html
new file mode 100644 (file)
index 0000000..3deb9c5
--- /dev/null
@@ -0,0 +1,1414 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>_concurrent_unordered_internal.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>_concurrent_unordered_internal.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="comment">/* Container implementations in this header are based on PPL implementations </span>
+<a name="l00022"></a>00022 <span class="comment">   provided by Microsoft. */</span>
+<a name="l00023"></a>00023 
+<a name="l00024"></a>00024 <span class="preprocessor">#ifndef __TBB_concurrent_unordered_internal_H</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_unordered_internal_H</span>
+<a name="l00026"></a>00026 <span class="preprocessor"></span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00031"></a>00031 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00032"></a>00032 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00034"></a>00034 <span class="preprocessor"></span>
+<a name="l00035"></a>00035 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00036"></a>00036 <span class="preprocessor">#include &lt;utility&gt;</span>      <span class="comment">// Need std::pair</span>
+<a name="l00037"></a>00037 <span class="preprocessor">#include &lt;functional&gt;</span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include &lt;string&gt;</span>       <span class="comment">// For tbb_hasher</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#include &lt;cstring&gt;</span>      <span class="comment">// Need std::memset</span>
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span>
+<a name="l00045"></a>00045 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00046"></a>00046 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00047"></a>00047 <span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00048"></a>00048 
+<a name="l00049"></a>00049 <span class="keyword">namespace </span>tbb {
+<a name="l00050"></a>00050 <span class="keyword">namespace </span>interface5 {
+<a name="l00052"></a>00052 <span class="keyword">namespace </span>internal {
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Allocator&gt;
+<a name="l00055"></a>00055 <span class="keyword">class </span>split_ordered_list;
+<a name="l00056"></a>00056 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Traits&gt;
+<a name="l00057"></a>00057 <span class="keyword">class </span>concurrent_unordered_base;
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059 <span class="comment">// Forward list iterators (without skipping dummy elements)</span>
+<a name="l00060"></a>00060 <span class="keyword">template</span>&lt;<span class="keyword">class</span> Solist, <span class="keyword">typename</span> Value&gt;
+<a name="l00061"></a>00061 <span class="keyword">class </span>flist_iterator : <span class="keyword">public</span> std::iterator&lt;std::forward_iterator_tag, Value&gt;
+<a name="l00062"></a>00062 {
+<a name="l00063"></a>00063     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Allocator&gt;
+<a name="l00064"></a>00064     <span class="keyword">friend</span> <span class="keyword">class </span>split_ordered_list;
+<a name="l00065"></a>00065     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Traits&gt;
+<a name="l00066"></a>00066     <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_unordered_base;
+<a name="l00067"></a>00067     <span class="keyword">template</span>&lt;<span class="keyword">class</span> M, <span class="keyword">typename</span> V&gt;
+<a name="l00068"></a>00068     <span class="keyword">friend</span> <span class="keyword">class </span>flist_iterator;
+<a name="l00069"></a>00069 
+<a name="l00070"></a>00070     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::nodeptr_t nodeptr_t;
+<a name="l00071"></a>00071 <span class="keyword">public</span>:
+<a name="l00072"></a>00072     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::value_type value_type;
+<a name="l00073"></a>00073     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::difference_type difference_type;
+<a name="l00074"></a>00074     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::pointer pointer;
+<a name="l00075"></a>00075     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::reference reference;
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077     flist_iterator() : my_node_ptr(0) {}
+<a name="l00078"></a>00078     flist_iterator( <span class="keyword">const</span> flist_iterator&lt;Solist, typename Solist::value_type&gt; &amp;other )
+<a name="l00079"></a>00079         : my_node_ptr(other.my_node_ptr) {}
+<a name="l00080"></a>00080 
+<a name="l00081"></a>00081     reference operator*()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_node_ptr-&gt;my_element; }
+<a name="l00082"></a>00082     pointer operator-&gt;()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> &amp;**<span class="keyword">this</span>; }
+<a name="l00083"></a>00083 
+<a name="l00084"></a>00084     flist_iterator&amp; operator++() {
+<a name="l00085"></a>00085         my_node_ptr = my_node_ptr-&gt;my_next;
+<a name="l00086"></a>00086         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00087"></a>00087     }
+<a name="l00088"></a>00088 
+<a name="l00089"></a>00089     flist_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00090"></a>00090         flist_iterator tmp = *<span class="keyword">this</span>;
+<a name="l00091"></a>00091         ++*<span class="keyword">this</span>;
+<a name="l00092"></a>00092         <span class="keywordflow">return</span> tmp;
+<a name="l00093"></a>00093     }
+<a name="l00094"></a>00094 
+<a name="l00095"></a>00095 <span class="keyword">protected</span>:
+<a name="l00096"></a>00096     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
+<a name="l00097"></a>00097     nodeptr_t get_node_ptr()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_node_ptr; }
+<a name="l00098"></a>00098 
+<a name="l00099"></a>00099     nodeptr_t my_node_ptr;
+<a name="l00100"></a>00100 
+<a name="l00101"></a>00101     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> M, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00102"></a>00102     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> flist_iterator&lt;M,T&gt; &amp;i, <span class="keyword">const</span> flist_iterator&lt;M,U&gt; &amp;j );
+<a name="l00103"></a>00103     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> M, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00104"></a>00104     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> flist_iterator&lt;M,T&gt;&amp; i, <span class="keyword">const</span> flist_iterator&lt;M,U&gt;&amp; j );
+<a name="l00105"></a>00105 };
+<a name="l00106"></a>00106 
+<a name="l00107"></a>00107 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Solist, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00108"></a>00108 <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> flist_iterator&lt;Solist,T&gt; &amp;i, <span class="keyword">const</span> flist_iterator&lt;Solist,U&gt; &amp;j ) {
+<a name="l00109"></a>00109     <span class="keywordflow">return</span> i.my_node_ptr == j.my_node_ptr;
+<a name="l00110"></a>00110 }
+<a name="l00111"></a>00111 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Solist, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00112"></a>00112 <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> flist_iterator&lt;Solist,T&gt;&amp; i, <span class="keyword">const</span> flist_iterator&lt;Solist,U&gt;&amp; j ) {
+<a name="l00113"></a>00113     <span class="keywordflow">return</span> i.my_node_ptr != j.my_node_ptr;
+<a name="l00114"></a>00114 }
+<a name="l00115"></a>00115 
+<a name="l00116"></a>00116 <span class="comment">// Split-order list iterators, needed to skip dummy elements</span>
+<a name="l00117"></a>00117 <span class="keyword">template</span>&lt;<span class="keyword">class</span> Solist, <span class="keyword">typename</span> Value&gt;
+<a name="l00118"></a>00118 <span class="keyword">class </span>solist_iterator : <span class="keyword">public</span> flist_iterator&lt;Solist, Value&gt;
+<a name="l00119"></a>00119 {
+<a name="l00120"></a>00120     <span class="keyword">typedef</span> flist_iterator&lt;Solist, Value&gt; base_type;
+<a name="l00121"></a>00121     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::nodeptr_t nodeptr_t;
+<a name="l00122"></a>00122     <span class="keyword">using</span> base_type::get_node_ptr;
+<a name="l00123"></a>00123     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Allocator&gt;
+<a name="l00124"></a>00124     <span class="keyword">friend</span> <span class="keyword">class </span>split_ordered_list;
+<a name="l00125"></a>00125     <span class="keyword">template</span>&lt;<span class="keyword">class</span> M, <span class="keyword">typename</span> V&gt;
+<a name="l00126"></a>00126     <span class="keyword">friend</span> <span class="keyword">class </span>solist_iterator;
+<a name="l00127"></a>00127     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> M, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00128"></a>00128     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> solist_iterator&lt;M,T&gt; &amp;i, <span class="keyword">const</span> solist_iterator&lt;M,U&gt; &amp;j );
+<a name="l00129"></a>00129     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> M, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00130"></a>00130     <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> solist_iterator&lt;M,T&gt;&amp; i, <span class="keyword">const</span> solist_iterator&lt;M,U&gt;&amp; j );
+<a name="l00131"></a>00131 
+<a name="l00132"></a>00132     <span class="keyword">const</span> Solist *my_list_ptr;
+<a name="l00133"></a>00133     solist_iterator(nodeptr_t pnode, <span class="keyword">const</span> Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135 <span class="keyword">public</span>:
+<a name="l00136"></a>00136     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::value_type value_type;
+<a name="l00137"></a>00137     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::difference_type difference_type;
+<a name="l00138"></a>00138     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::pointer pointer;
+<a name="l00139"></a>00139     <span class="keyword">typedef</span> <span class="keyword">typename</span> Solist::reference reference;
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141     solist_iterator() {}
+<a name="l00142"></a>00142     solist_iterator(<span class="keyword">const</span> solist_iterator&lt;Solist, typename Solist::value_type&gt; &amp;other )
+<a name="l00143"></a>00143         : base_type(other), my_list_ptr(other.my_list_ptr) {}
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145     reference operator*()<span class="keyword"> const </span>{
+<a name="l00146"></a>00146         <span class="keywordflow">return</span> this-&gt;base_type::operator*();
+<a name="l00147"></a>00147     }
+<a name="l00148"></a>00148 
+<a name="l00149"></a>00149     pointer operator-&gt;()<span class="keyword"> const </span>{
+<a name="l00150"></a>00150         <span class="keywordflow">return</span> (&amp;**<span class="keyword">this</span>);
+<a name="l00151"></a>00151     }
+<a name="l00152"></a>00152 
+<a name="l00153"></a>00153     solist_iterator&amp; operator++() {
+<a name="l00154"></a>00154         <span class="keywordflow">do</span> ++(*(base_type *)<span class="keyword">this</span>);
+<a name="l00155"></a>00155         <span class="keywordflow">while</span> (get_node_ptr() != NULL &amp;&amp; get_node_ptr()-&gt;is_dummy());
+<a name="l00156"></a>00156 
+<a name="l00157"></a>00157         <span class="keywordflow">return</span> (*<span class="keyword">this</span>);
+<a name="l00158"></a>00158     }
+<a name="l00159"></a>00159 
+<a name="l00160"></a>00160     solist_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00161"></a>00161         solist_iterator tmp = *<span class="keyword">this</span>;
+<a name="l00162"></a>00162         <span class="keywordflow">do</span> ++*<span class="keyword">this</span>;
+<a name="l00163"></a>00163         <span class="keywordflow">while</span> (get_node_ptr() != NULL &amp;&amp; get_node_ptr()-&gt;is_dummy());
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165         <span class="keywordflow">return</span> (tmp);
+<a name="l00166"></a>00166     }
+<a name="l00167"></a>00167 };
+<a name="l00168"></a>00168 
+<a name="l00169"></a>00169 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Solist, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00170"></a>00170 <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> solist_iterator&lt;Solist,T&gt; &amp;i, <span class="keyword">const</span> solist_iterator&lt;Solist,U&gt; &amp;j ) {
+<a name="l00171"></a>00171     <span class="keywordflow">return</span> i.my_node_ptr == j.my_node_ptr &amp;&amp; i.my_list_ptr == j.my_list_ptr;
+<a name="l00172"></a>00172 }
+<a name="l00173"></a>00173 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Solist, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00174"></a>00174 <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> solist_iterator&lt;Solist,T&gt;&amp; i, <span class="keyword">const</span> solist_iterator&lt;Solist,U&gt;&amp; j ) {
+<a name="l00175"></a>00175     <span class="keywordflow">return</span> i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
+<a name="l00176"></a>00176 }
+<a name="l00177"></a>00177 
+<a name="l00178"></a>00178 <span class="comment">// Forward type and class definitions</span>
+<a name="l00179"></a>00179 <span class="keyword">typedef</span> size_t sokey_t;
+<a name="l00180"></a>00180 
+<a name="l00181"></a>00181 <span class="comment">// Forward list in which elements are sorted in a split-order</span>
+<a name="l00182"></a>00182 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Allocator&gt;
+<a name="l00183"></a>00183 <span class="keyword">class </span>split_ordered_list
+<a name="l00184"></a>00184 {
+<a name="l00185"></a>00185 <span class="keyword">public</span>:
+<a name="l00186"></a>00186     <span class="keyword">typedef</span> split_ordered_list&lt;T, Allocator&gt; self_type;
+<a name="l00187"></a>00187     <span class="keyword">typedef</span> <span class="keyword">typename</span> Allocator::template rebind&lt;T&gt;::other allocator_type;
+<a name="l00188"></a>00188     <span class="keyword">struct </span>node;
+<a name="l00189"></a>00189     <span class="keyword">typedef</span> node *nodeptr_t;
+<a name="l00190"></a>00190 
+<a name="l00191"></a>00191     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::size_type size_type;
+<a name="l00192"></a>00192     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::difference_type difference_type;
+<a name="l00193"></a>00193     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::pointer pointer;
+<a name="l00194"></a>00194     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::const_pointer const_pointer;
+<a name="l00195"></a>00195     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::reference reference;
+<a name="l00196"></a>00196     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::const_reference const_reference;
+<a name="l00197"></a>00197     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::value_type value_type;
+<a name="l00198"></a>00198 
+<a name="l00199"></a>00199     <span class="keyword">typedef</span> solist_iterator&lt;self_type, const value_type&gt; const_iterator;
+<a name="l00200"></a>00200     <span class="keyword">typedef</span> solist_iterator&lt;self_type, value_type&gt; iterator;
+<a name="l00201"></a>00201     <span class="keyword">typedef</span> flist_iterator&lt;self_type, const value_type&gt; raw_const_iterator;
+<a name="l00202"></a>00202     <span class="keyword">typedef</span> flist_iterator&lt;self_type, value_type&gt; raw_iterator;
+<a name="l00203"></a>00203 
+<a name="l00204"></a>00204     <span class="comment">// Node that holds the element in a split-ordered list</span>
+<a name="l00205"></a>00205     <span class="keyword">struct </span>node : tbb::internal::no_assign
+<a name="l00206"></a>00206     {
+<a name="l00207"></a>00207         <span class="comment">// Initialize the node with the given order key</span>
+<a name="l00208"></a>00208         <span class="keywordtype">void</span> init(sokey_t order_key) {
+<a name="l00209"></a>00209             my_order_key = order_key;
+<a name="l00210"></a>00210             my_next = NULL;
+<a name="l00211"></a>00211         }
+<a name="l00212"></a>00212 
+<a name="l00213"></a>00213         <span class="comment">// Return the order key (needed for hashing)</span>
+<a name="l00214"></a>00214         sokey_t get_order_key()<span class="keyword"> const </span>{ <span class="comment">// TODO: remove</span>
+<a name="l00215"></a>00215             <span class="keywordflow">return</span> my_order_key;
+<a name="l00216"></a>00216         }
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218         <span class="comment">// Inserts the new element in the list in an atomic fashion</span>
+<a name="l00219"></a>00219         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
+<a name="l00220"></a>00220         {
+<a name="l00221"></a>00221             <span class="comment">// Try to change the next pointer on the current element to a new element, only if it still points to the cached next</span>
+<a name="l00222"></a>00222             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((<span class="keywordtype">void</span> *) &amp;my_next, (uintptr_t)new_node, (uintptr_t)current_node);
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224             <span class="keywordflow">if</span> (exchange_node == current_node) <span class="comment">// TODO: why this branch?</span>
+<a name="l00225"></a>00225             {
+<a name="l00226"></a>00226                 <span class="comment">// Operation succeeded, return the new node</span>
+<a name="l00227"></a>00227                 <span class="keywordflow">return</span> new_node;
+<a name="l00228"></a>00228             }
+<a name="l00229"></a>00229             <span class="keywordflow">else</span>
+<a name="l00230"></a>00230             {
+<a name="l00231"></a>00231                 <span class="comment">// Operation failed, return the "interfering" node</span>
+<a name="l00232"></a>00232                 <span class="keywordflow">return</span> exchange_node;
+<a name="l00233"></a>00233             }
+<a name="l00234"></a>00234         }
+<a name="l00235"></a>00235 
+<a name="l00236"></a>00236         <span class="comment">// Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets</span>
+<a name="l00237"></a>00237         <span class="comment">// in the hash table to quickly index into the right subsection of the split-ordered list.</span>
+<a name="l00238"></a>00238         <span class="keywordtype">bool</span> is_dummy()<span class="keyword"> const </span>{
+<a name="l00239"></a>00239             <span class="keywordflow">return</span> (my_order_key &amp; 0x1) == 0;
+<a name="l00240"></a>00240         }
+<a name="l00241"></a>00241 
+<a name="l00242"></a>00242 
+<a name="l00243"></a>00243         nodeptr_t  my_next;      <span class="comment">// Next element in the list</span>
+<a name="l00244"></a>00244         value_type my_element;   <span class="comment">// Element storage</span>
+<a name="l00245"></a>00245         sokey_t    my_order_key; <span class="comment">// Order key for this element</span>
+<a name="l00246"></a>00246     };
+<a name="l00247"></a>00247 
+<a name="l00248"></a>00248     <span class="comment">// Allocate a new node with the given order key and value</span>
+<a name="l00249"></a>00249     nodeptr_t create_node(sokey_t order_key, <span class="keyword">const</span> T &amp;value) {
+<a name="l00250"></a>00250         nodeptr_t pnode = my_node_allocator.allocate(1);
+<a name="l00251"></a>00251 
+<a name="l00252"></a>00252         __TBB_TRY {
+<a name="l00253"></a>00253             <span class="keyword">new</span>(static_cast&lt;void*&gt;(&amp;pnode-&gt;my_element)) T(value);
+<a name="l00254"></a>00254             pnode-&gt;init(order_key);
+<a name="l00255"></a>00255         } __TBB_CATCH(...) {
+<a name="l00256"></a>00256             my_node_allocator.deallocate(pnode, 1);
+<a name="l00257"></a>00257             __TBB_RETHROW();
+<a name="l00258"></a>00258         }
+<a name="l00259"></a>00259 
+<a name="l00260"></a>00260         <span class="keywordflow">return</span> (pnode);
+<a name="l00261"></a>00261     }
+<a name="l00262"></a>00262 
+<a name="l00263"></a>00263     <span class="comment">// Allocate a new node with the given order key; used to allocate dummy nodes</span>
+<a name="l00264"></a>00264     nodeptr_t create_node(sokey_t order_key) {
+<a name="l00265"></a>00265         nodeptr_t pnode = my_node_allocator.allocate(1);
+<a name="l00266"></a>00266 
+<a name="l00267"></a>00267         __TBB_TRY {
+<a name="l00268"></a>00268             <span class="keyword">new</span>(static_cast&lt;void*&gt;(&amp;pnode-&gt;my_element)) T();
+<a name="l00269"></a>00269             pnode-&gt;init(order_key);
+<a name="l00270"></a>00270         } __TBB_CATCH(...) {
+<a name="l00271"></a>00271             my_node_allocator.deallocate(pnode, 1);
+<a name="l00272"></a>00272             __TBB_RETHROW();
+<a name="l00273"></a>00273         }
+<a name="l00274"></a>00274 
+<a name="l00275"></a>00275         <span class="keywordflow">return</span> (pnode);
+<a name="l00276"></a>00276     }
+<a name="l00277"></a>00277 
+<a name="l00278"></a>00278    split_ordered_list(allocator_type a = allocator_type())
+<a name="l00279"></a>00279        : my_node_allocator(a), my_element_count(0)
+<a name="l00280"></a>00280     {
+<a name="l00281"></a>00281         <span class="comment">// Immediately allocate a dummy node with order key of 0. This node</span>
+<a name="l00282"></a>00282         <span class="comment">// will always be the head of the list.</span>
+<a name="l00283"></a>00283         my_head = create_node(0);
+<a name="l00284"></a>00284     }
+<a name="l00285"></a>00285 
+<a name="l00286"></a>00286     ~split_ordered_list()
+<a name="l00287"></a>00287     {
+<a name="l00288"></a>00288         <span class="comment">// Clear the list</span>
+<a name="l00289"></a>00289         clear();
+<a name="l00290"></a>00290 
+<a name="l00291"></a>00291         <span class="comment">// Remove the head element which is not cleared by clear()</span>
+<a name="l00292"></a>00292         nodeptr_t pnode = my_head;
+<a name="l00293"></a>00293         my_head = NULL;
+<a name="l00294"></a>00294 
+<a name="l00295"></a>00295         __TBB_ASSERT(pnode != NULL &amp;&amp; pnode-&gt;my_next == NULL, <span class="stringliteral">"Invalid head list node"</span>);
+<a name="l00296"></a>00296 
+<a name="l00297"></a>00297         destroy_node(pnode);
+<a name="l00298"></a>00298     }
+<a name="l00299"></a>00299 
+<a name="l00300"></a>00300     <span class="comment">// Common forward list functions</span>
+<a name="l00301"></a>00301 
+<a name="l00302"></a>00302     allocator_type get_allocator()<span class="keyword"> const </span>{
+<a name="l00303"></a>00303         <span class="keywordflow">return</span> (my_node_allocator);
+<a name="l00304"></a>00304     }
+<a name="l00305"></a>00305 
+<a name="l00306"></a>00306     <span class="keywordtype">void</span> clear() {
+<a name="l00307"></a>00307         nodeptr_t pnext;
+<a name="l00308"></a>00308         nodeptr_t pnode = my_head;
+<a name="l00309"></a>00309 
+<a name="l00310"></a>00310         __TBB_ASSERT(my_head != NULL, <span class="stringliteral">"Invalid head list node"</span>);
+<a name="l00311"></a>00311         pnext = pnode-&gt;my_next;
+<a name="l00312"></a>00312         pnode-&gt;my_next = NULL;
+<a name="l00313"></a>00313         pnode = pnext;
+<a name="l00314"></a>00314 
+<a name="l00315"></a>00315         <span class="keywordflow">while</span> (pnode != NULL)
+<a name="l00316"></a>00316         {
+<a name="l00317"></a>00317             pnext = pnode-&gt;my_next;
+<a name="l00318"></a>00318             destroy_node(pnode);
+<a name="l00319"></a>00319             pnode = pnext;
+<a name="l00320"></a>00320         }
+<a name="l00321"></a>00321 
+<a name="l00322"></a>00322         my_element_count = 0;
+<a name="l00323"></a>00323     }
+<a name="l00324"></a>00324 
+<a name="l00325"></a>00325     <span class="comment">// Returns a first non-dummy element in the SOL</span>
+<a name="l00326"></a>00326     iterator begin() {
+<a name="l00327"></a>00327         <span class="keywordflow">return</span> first_real_iterator(raw_begin());
+<a name="l00328"></a>00328     }
+<a name="l00329"></a>00329 
+<a name="l00330"></a>00330     <span class="comment">// Returns a first non-dummy element in the SOL</span>
+<a name="l00331"></a>00331     const_iterator begin()<span class="keyword"> const </span>{
+<a name="l00332"></a>00332         <span class="keywordflow">return</span> first_real_iterator(raw_begin());
+<a name="l00333"></a>00333     }
+<a name="l00334"></a>00334 
+<a name="l00335"></a>00335     iterator end() {
+<a name="l00336"></a>00336         <span class="keywordflow">return</span> (iterator(0, <span class="keyword">this</span>));
+<a name="l00337"></a>00337     }
+<a name="l00338"></a>00338 
+<a name="l00339"></a>00339     const_iterator end()<span class="keyword"> const </span>{
+<a name="l00340"></a>00340         <span class="keywordflow">return</span> (const_iterator(0, <span class="keyword">this</span>));
+<a name="l00341"></a>00341     }
+<a name="l00342"></a>00342 
+<a name="l00343"></a>00343     const_iterator cbegin()<span class="keyword"> const </span>{
+<a name="l00344"></a>00344         <span class="keywordflow">return</span> (((<span class="keyword">const</span> self_type *)<span class="keyword">this</span>)-&gt;begin());
+<a name="l00345"></a>00345     }
+<a name="l00346"></a>00346 
+<a name="l00347"></a>00347     const_iterator cend()<span class="keyword"> const </span>{
+<a name="l00348"></a>00348         <span class="keywordflow">return</span> (((<span class="keyword">const</span> self_type *)<span class="keyword">this</span>)-&gt;end());
+<a name="l00349"></a>00349     }
+<a name="l00350"></a>00350 
+<a name="l00351"></a>00351     <span class="comment">// Checks if the number of elements (non-dummy) is 0</span>
+<a name="l00352"></a>00352     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{
+<a name="l00353"></a>00353         <span class="keywordflow">return</span> (my_element_count == 0);
+<a name="l00354"></a>00354     }
+<a name="l00355"></a>00355 
+<a name="l00356"></a>00356     <span class="comment">// Returns the number of non-dummy elements in the list</span>
+<a name="l00357"></a>00357     size_type size()<span class="keyword"> const </span>{
+<a name="l00358"></a>00358         <span class="keywordflow">return</span> my_element_count;
+<a name="l00359"></a>00359     }
+<a name="l00360"></a>00360 
+<a name="l00361"></a>00361     <span class="comment">// Returns the maximum size of the list, determined by the allocator</span>
+<a name="l00362"></a>00362     size_type max_size()<span class="keyword"> const </span>{
+<a name="l00363"></a>00363         <span class="keywordflow">return</span> my_node_allocator.max_size();
+<a name="l00364"></a>00364     }
+<a name="l00365"></a>00365 
+<a name="l00366"></a>00366     <span class="comment">// Swaps 'this' list with the passed in one</span>
+<a name="l00367"></a>00367     <span class="keywordtype">void</span> swap(self_type&amp; other)
+<a name="l00368"></a>00368     {
+<a name="l00369"></a>00369         <span class="keywordflow">if</span> (<span class="keyword">this</span> == &amp;other)
+<a name="l00370"></a>00370         {
+<a name="l00371"></a>00371             <span class="comment">// Nothing to do</span>
+<a name="l00372"></a>00372             <span class="keywordflow">return</span>;
+<a name="l00373"></a>00373         }
+<a name="l00374"></a>00374 
+<a name="l00375"></a>00375         std::swap(my_element_count, other.my_element_count);
+<a name="l00376"></a>00376         std::swap(my_head, other.my_head);
+<a name="l00377"></a>00377     }
+<a name="l00378"></a>00378 
+<a name="l00379"></a>00379     <span class="comment">// Split-order list functions</span>
+<a name="l00380"></a>00380 
+<a name="l00381"></a>00381     <span class="comment">// Returns a first element in the SOL, which is always a dummy</span>
+<a name="l00382"></a>00382     raw_iterator raw_begin() {
+<a name="l00383"></a>00383         <span class="keywordflow">return</span> raw_iterator(my_head);
+<a name="l00384"></a>00384     }
+<a name="l00385"></a>00385 
+<a name="l00386"></a>00386     <span class="comment">// Returns a first element in the SOL, which is always a dummy</span>
+<a name="l00387"></a>00387     raw_const_iterator raw_begin()<span class="keyword"> const </span>{
+<a name="l00388"></a>00388         <span class="keywordflow">return</span> raw_const_iterator(my_head);
+<a name="l00389"></a>00389     }
+<a name="l00390"></a>00390 
+<a name="l00391"></a>00391     raw_iterator raw_end() {
+<a name="l00392"></a>00392         <span class="keywordflow">return</span> raw_iterator(0);
+<a name="l00393"></a>00393     }
+<a name="l00394"></a>00394 
+<a name="l00395"></a>00395     raw_const_iterator raw_end()<span class="keyword"> const </span>{
+<a name="l00396"></a>00396         <span class="keywordflow">return</span> raw_const_iterator(0);
+<a name="l00397"></a>00397     }
+<a name="l00398"></a>00398 
+<a name="l00399"></a>00399     <span class="keyword">static</span> sokey_t get_order_key(<span class="keyword">const</span> raw_const_iterator&amp; it) {
+<a name="l00400"></a>00400         <span class="keywordflow">return</span> it.get_node_ptr()-&gt;get_order_key();
+<a name="l00401"></a>00401     }
+<a name="l00402"></a>00402 
+<a name="l00403"></a>00403     <span class="keyword">static</span> sokey_t get_safe_order_key(<span class="keyword">const</span> raw_const_iterator&amp; it) {
+<a name="l00404"></a>00404         <span class="keywordflow">if</span>( !it.get_node_ptr() ) <span class="keywordflow">return</span> sokey_t(~0U);
+<a name="l00405"></a>00405         <span class="keywordflow">return</span> it.get_node_ptr()-&gt;get_order_key();
+<a name="l00406"></a>00406     }
+<a name="l00407"></a>00407 
+<a name="l00408"></a>00408     <span class="comment">// Returns a public iterator version of the internal iterator. Public iterator must not</span>
+<a name="l00409"></a>00409     <span class="comment">// be a dummy private iterator.</span>
+<a name="l00410"></a>00410     iterator get_iterator(raw_iterator it) {
+<a name="l00411"></a>00411         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()-&gt;is_dummy(), <span class="stringliteral">"Invalid user node (dummy)"</span>);
+<a name="l00412"></a>00412         <span class="keywordflow">return</span> iterator(it.get_node_ptr(), <span class="keyword">this</span>);
+<a name="l00413"></a>00413     }
+<a name="l00414"></a>00414 
+<a name="l00415"></a>00415     <span class="comment">// Returns a public iterator version of the internal iterator. Public iterator must not</span>
+<a name="l00416"></a>00416     <span class="comment">// be a dummy private iterator.</span>
+<a name="l00417"></a>00417     const_iterator get_iterator(raw_const_iterator it)<span class="keyword"> const </span>{
+<a name="l00418"></a>00418         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()-&gt;is_dummy(), <span class="stringliteral">"Invalid user node (dummy)"</span>);
+<a name="l00419"></a>00419         <span class="keywordflow">return</span> const_iterator(it.get_node_ptr(), <span class="keyword">this</span>);
+<a name="l00420"></a>00420     }
+<a name="l00421"></a>00421 
+<a name="l00422"></a>00422     <span class="comment">// Returns a non-const version of the raw_iterator</span>
+<a name="l00423"></a>00423     raw_iterator get_iterator(raw_const_iterator it) {
+<a name="l00424"></a>00424         <span class="keywordflow">return</span> raw_iterator(it.get_node_ptr());
+<a name="l00425"></a>00425     }
+<a name="l00426"></a>00426 
+<a name="l00427"></a>00427     <span class="comment">// Returns a non-const version of the iterator</span>
+<a name="l00428"></a>00428     <span class="keyword">static</span> iterator get_iterator(const_iterator it) {
+<a name="l00429"></a>00429         <span class="keywordflow">return</span> iterator(it.my_node_ptr, it.my_list_ptr);
+<a name="l00430"></a>00430     }
+<a name="l00431"></a>00431 
+<a name="l00432"></a>00432     <span class="comment">// Returns a public iterator version of a first non-dummy internal iterator at or after</span>
+<a name="l00433"></a>00433     <span class="comment">// the passed in internal iterator.</span>
+<a name="l00434"></a>00434     iterator first_real_iterator(raw_iterator it)
+<a name="l00435"></a>00435     {
+<a name="l00436"></a>00436         <span class="comment">// Skip all dummy, internal only iterators</span>
+<a name="l00437"></a>00437         <span class="keywordflow">while</span> (it != raw_end() &amp;&amp; it.get_node_ptr()-&gt;is_dummy())
+<a name="l00438"></a>00438             ++it;
+<a name="l00439"></a>00439 
+<a name="l00440"></a>00440         <span class="keywordflow">return</span> iterator(it.get_node_ptr(), <span class="keyword">this</span>);
+<a name="l00441"></a>00441     }
+<a name="l00442"></a>00442 
+<a name="l00443"></a>00443     <span class="comment">// Returns a public iterator version of a first non-dummy internal iterator at or after</span>
+<a name="l00444"></a>00444     <span class="comment">// the passed in internal iterator.</span>
+<a name="l00445"></a>00445     const_iterator first_real_iterator(raw_const_iterator it)<span class="keyword"> const</span>
+<a name="l00446"></a>00446 <span class="keyword">    </span>{
+<a name="l00447"></a>00447         <span class="comment">// Skip all dummy, internal only iterators</span>
+<a name="l00448"></a>00448         <span class="keywordflow">while</span> (it != raw_end() &amp;&amp; it.get_node_ptr()-&gt;is_dummy())
+<a name="l00449"></a>00449             ++it;
+<a name="l00450"></a>00450 
+<a name="l00451"></a>00451         <span class="keywordflow">return</span> const_iterator(it.get_node_ptr(), <span class="keyword">this</span>);
+<a name="l00452"></a>00452     }
+<a name="l00453"></a>00453 
+<a name="l00454"></a>00454     <span class="comment">// Erase an element using the allocator</span>
+<a name="l00455"></a>00455     <span class="keywordtype">void</span> destroy_node(nodeptr_t pnode) {
+<a name="l00456"></a>00456         my_node_allocator.destroy(pnode);
+<a name="l00457"></a>00457         my_node_allocator.deallocate(pnode, 1);
+<a name="l00458"></a>00458     }
+<a name="l00459"></a>00459 
+<a name="l00460"></a>00460     <span class="comment">// Try to insert a new element in the list. If insert fails, return the node that</span>
+<a name="l00461"></a>00461     <span class="comment">// was inserted instead.</span>
+<a name="l00462"></a>00462     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
+<a name="l00463"></a>00463         new_node-&gt;my_next = current_node;
+<a name="l00464"></a>00464         <span class="keywordflow">return</span> previous-&gt;atomic_set_next(new_node, current_node);
+<a name="l00465"></a>00465     }
+<a name="l00466"></a>00466 
+<a name="l00467"></a>00467     <span class="comment">// Insert a new element between passed in iterators</span>
+<a name="l00468"></a>00468     std::pair&lt;iterator, bool&gt; try_insert(raw_iterator it, raw_iterator next, <span class="keyword">const</span> value_type &amp;value, sokey_t order_key, size_type *new_count)
+<a name="l00469"></a>00469     {
+<a name="l00470"></a>00470         nodeptr_t pnode = create_node(order_key, value);
+<a name="l00471"></a>00471         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
+<a name="l00472"></a>00472 
+<a name="l00473"></a>00473         <span class="keywordflow">if</span> (inserted_node == pnode)
+<a name="l00474"></a>00474         {
+<a name="l00475"></a>00475             <span class="comment">// If the insert succeeded, check that the order is correct and increment the element count</span>
+<a name="l00476"></a>00476             check_range();
+<a name="l00477"></a>00477             *new_count = __TBB_FetchAndAddW((uintptr_t*)&amp;my_element_count, uintptr_t(1));
+<a name="l00478"></a>00478             <span class="keywordflow">return</span> std::pair&lt;iterator, bool&gt;(iterator(pnode, <span class="keyword">this</span>), <span class="keyword">true</span>);
+<a name="l00479"></a>00479         }
+<a name="l00480"></a>00480         <span class="keywordflow">else</span>
+<a name="l00481"></a>00481         {
+<a name="l00482"></a>00482             <span class="comment">// If the insert failed (element already there), then delete the new one</span>
+<a name="l00483"></a>00483             destroy_node(pnode);
+<a name="l00484"></a>00484             <span class="keywordflow">return</span> std::pair&lt;iterator, bool&gt;(end(), <span class="keyword">false</span>);
+<a name="l00485"></a>00485         }
+<a name="l00486"></a>00486     }
+<a name="l00487"></a>00487 
+<a name="l00488"></a>00488     <span class="comment">// Insert a new dummy element, starting search at a parent dummy element</span>
+<a name="l00489"></a>00489     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
+<a name="l00490"></a>00490     {
+<a name="l00491"></a>00491         raw_iterator last = raw_end();
+<a name="l00492"></a>00492         raw_iterator where = it;
+<a name="l00493"></a>00493 
+<a name="l00494"></a>00494         __TBB_ASSERT(where != last, <span class="stringliteral">"Invalid head node"</span>);
+<a name="l00495"></a>00495 
+<a name="l00496"></a>00496         ++where;
+<a name="l00497"></a>00497 
+<a name="l00498"></a>00498         <span class="comment">// Create a dummy element up front, even though it may be discarded (due to concurrent insertion)</span>
+<a name="l00499"></a>00499         nodeptr_t dummy_node = create_node(order_key);
+<a name="l00500"></a>00500 
+<a name="l00501"></a>00501         <span class="keywordflow">for</span> (;;)
+<a name="l00502"></a>00502         {
+<a name="l00503"></a>00503             __TBB_ASSERT(it != last, <span class="stringliteral">"Invalid head list node"</span>);
+<a name="l00504"></a>00504 
+<a name="l00505"></a>00505             <span class="comment">// If the head iterator is at the end of the list, or past the point where this dummy</span>
+<a name="l00506"></a>00506             <span class="comment">// node needs to be inserted, then try to insert it.</span>
+<a name="l00507"></a>00507             <span class="keywordflow">if</span> (where == last || get_order_key(where) &gt; order_key)
+<a name="l00508"></a>00508             {
+<a name="l00509"></a>00509                 __TBB_ASSERT(get_order_key(it) &lt; order_key, <span class="stringliteral">"Invalid node order in the list"</span>);
+<a name="l00510"></a>00510 
+<a name="l00511"></a>00511                 <span class="comment">// Try to insert it in the right place</span>
+<a name="l00512"></a>00512                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
+<a name="l00513"></a>00513 
+<a name="l00514"></a>00514                 <span class="keywordflow">if</span> (inserted_node == dummy_node)
+<a name="l00515"></a>00515                 {
+<a name="l00516"></a>00516                     <span class="comment">// Insertion succeeded, check the list for order violations</span>
+<a name="l00517"></a>00517                     check_range();
+<a name="l00518"></a>00518                     <span class="keywordflow">return</span> raw_iterator(dummy_node);
+<a name="l00519"></a>00519                 }
+<a name="l00520"></a>00520                 <span class="keywordflow">else</span>
+<a name="l00521"></a>00521                 {
+<a name="l00522"></a>00522                     <span class="comment">// Insertion failed: either dummy node was inserted by another thread, or</span>
+<a name="l00523"></a>00523                     <span class="comment">// a real element was inserted at exactly the same place as dummy node.</span>
+<a name="l00524"></a>00524                     <span class="comment">// Proceed with the search from the previous location where order key was</span>
+<a name="l00525"></a>00525                     <span class="comment">// known to be larger (note: this is legal only because there is no safe</span>
+<a name="l00526"></a>00526                     <span class="comment">// concurrent erase operation supported).</span>
+<a name="l00527"></a>00527                     where = it;
+<a name="l00528"></a>00528                     ++where;
+<a name="l00529"></a>00529                     <span class="keywordflow">continue</span>;
+<a name="l00530"></a>00530                 }
+<a name="l00531"></a>00531             }
+<a name="l00532"></a>00532             <span class="keywordflow">else</span> <span class="keywordflow">if</span> (get_order_key(where) == order_key)
+<a name="l00533"></a>00533             {
+<a name="l00534"></a>00534                 <span class="comment">// Another dummy node with the same value found, discard the new one.</span>
+<a name="l00535"></a>00535                 destroy_node(dummy_node);
+<a name="l00536"></a>00536                 <span class="keywordflow">return</span> where;
+<a name="l00537"></a>00537             }
+<a name="l00538"></a>00538 
+<a name="l00539"></a>00539             <span class="comment">// Move the iterator forward</span>
+<a name="l00540"></a>00540             it = where;
+<a name="l00541"></a>00541             ++where;
+<a name="l00542"></a>00542         }
+<a name="l00543"></a>00543 
+<a name="l00544"></a>00544     }
+<a name="l00545"></a>00545 
+<a name="l00546"></a>00546     <span class="comment">// This erase function can handle both real and dummy nodes</span>
+<a name="l00547"></a>00547     <span class="keywordtype">void</span> erase_node(raw_iterator previous, raw_const_iterator&amp; where)
+<a name="l00548"></a>00548     {
+<a name="l00549"></a>00549         nodeptr_t pnode = (where++).get_node_ptr();
+<a name="l00550"></a>00550         nodeptr_t prevnode = previous.get_node_ptr();
+<a name="l00551"></a>00551         __TBB_ASSERT(prevnode-&gt;my_next == pnode, <span class="stringliteral">"Erase must take consecutive iterators"</span>);
+<a name="l00552"></a>00552         prevnode-&gt;my_next = pnode-&gt;my_next;
+<a name="l00553"></a>00553 
+<a name="l00554"></a>00554         destroy_node(pnode);
+<a name="l00555"></a>00555     }
+<a name="l00556"></a>00556 
+<a name="l00557"></a>00557     <span class="comment">// Erase the element (previous node needs to be passed because this is a forward only list)</span>
+<a name="l00558"></a>00558     iterator erase_node(raw_iterator previous, const_iterator where)
+<a name="l00559"></a>00559     {
+<a name="l00560"></a>00560         raw_const_iterator it = where;
+<a name="l00561"></a>00561         erase_node(previous, it);
+<a name="l00562"></a>00562         my_element_count--;
+<a name="l00563"></a>00563 
+<a name="l00564"></a>00564         <span class="keywordflow">return</span> get_iterator(first_real_iterator(it));
+<a name="l00565"></a>00565     }
+<a name="l00566"></a>00566 
+<a name="l00567"></a>00567     <span class="comment">// Move all elements from the passed in split-ordered list to this one</span>
+<a name="l00568"></a>00568     <span class="keywordtype">void</span> move_all(self_type&amp; source)
+<a name="l00569"></a>00569     {
+<a name="l00570"></a>00570         raw_const_iterator first = source.raw_begin();
+<a name="l00571"></a>00571         raw_const_iterator last = source.raw_end();
+<a name="l00572"></a>00572 
+<a name="l00573"></a>00573         <span class="keywordflow">if</span> (first == last)
+<a name="l00574"></a>00574             <span class="keywordflow">return</span>;
+<a name="l00575"></a>00575 
+<a name="l00576"></a>00576         nodeptr_t previous_node = my_head;
+<a name="l00577"></a>00577         raw_const_iterator begin_iterator = first++;
+<a name="l00578"></a>00578 
+<a name="l00579"></a>00579         <span class="comment">// Move all elements one by one, including dummy ones</span>
+<a name="l00580"></a>00580         <span class="keywordflow">for</span> (raw_const_iterator it = first; it != last;)
+<a name="l00581"></a>00581         {
+<a name="l00582"></a>00582             nodeptr_t pnode = it.get_node_ptr();
+<a name="l00583"></a>00583 
+<a name="l00584"></a>00584             nodeptr_t dummy_node = pnode-&gt;is_dummy() ? create_node(pnode-&gt;get_order_key()) : create_node(pnode-&gt;get_order_key(), pnode-&gt;my_element);
+<a name="l00585"></a>00585             previous_node = try_insert(previous_node, dummy_node, NULL);
+<a name="l00586"></a>00586             __TBB_ASSERT(previous_node != NULL, <span class="stringliteral">"Insertion must succeed"</span>);
+<a name="l00587"></a>00587             raw_const_iterator where = it++;
+<a name="l00588"></a>00588             source.erase_node(get_iterator(begin_iterator), where);
+<a name="l00589"></a>00589         }
+<a name="l00590"></a>00590         check_range();
+<a name="l00591"></a>00591     }
+<a name="l00592"></a>00592 
+<a name="l00593"></a>00593 
+<a name="l00594"></a>00594 <span class="keyword">private</span>:
+<a name="l00595"></a>00595 
+<a name="l00596"></a>00596     <span class="comment">// Check the list for order violations</span>
+<a name="l00597"></a>00597     <span class="keywordtype">void</span> check_range()
+<a name="l00598"></a>00598     {
+<a name="l00599"></a>00599 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00600"></a>00600 <span class="preprocessor"></span>        <span class="keywordflow">for</span> (raw_iterator it = raw_begin(); it != raw_end(); ++it)
+<a name="l00601"></a>00601         {
+<a name="l00602"></a>00602             raw_iterator next_iterator = it;
+<a name="l00603"></a>00603             ++next_iterator;
+<a name="l00604"></a>00604 
+<a name="l00605"></a>00605             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()-&gt;get_order_key() &gt;= it.get_node_ptr()-&gt;get_order_key(), <span class="stringliteral">"!!! List order inconsistency !!!"</span>);
+<a name="l00606"></a>00606         }
+<a name="l00607"></a>00607 <span class="preprocessor">#endif</span>
+<a name="l00608"></a>00608 <span class="preprocessor"></span>    }
+<a name="l00609"></a>00609 
+<a name="l00610"></a>00610     <span class="keyword">typename</span> allocator_type::template rebind&lt;node&gt;::other my_node_allocator;  <span class="comment">// allocator object for nodes</span>
+<a name="l00611"></a>00611     size_type                                             my_element_count;   <span class="comment">// Total item count, not counting dummy nodes</span>
+<a name="l00612"></a>00612     nodeptr_t                                             my_head;            <span class="comment">// pointer to head node</span>
+<a name="l00613"></a>00613 };
+<a name="l00614"></a>00614 
+<a name="l00615"></a>00615 <span class="comment">// Template class for hash compare</span>
+<a name="l00616"></a>00616 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> Hasher, <span class="keyword">typename</span> Key_equality&gt;
+<a name="l00617"></a>00617 <span class="keyword">class </span>hash_compare
+<a name="l00618"></a>00618 {
+<a name="l00619"></a>00619 <span class="keyword">public</span>:
+<a name="l00620"></a>00620     hash_compare() {}
+<a name="l00621"></a>00621 
+<a name="l00622"></a>00622     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
+<a name="l00623"></a>00623 
+<a name="l00624"></a>00624     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
+<a name="l00625"></a>00625 
+<a name="l00626"></a>00626     size_t operator()(<span class="keyword">const</span> Key&amp; key)<span class="keyword"> const </span>{
+<a name="l00627"></a>00627         <span class="keywordflow">return</span> ((size_t)my_hash_object(key));
+<a name="l00628"></a>00628     }
+<a name="l00629"></a>00629 
+<a name="l00630"></a>00630     <span class="keywordtype">bool</span> operator()(<span class="keyword">const</span> Key&amp; key1, <span class="keyword">const</span> Key&amp; key2)<span class="keyword"> const </span>{
+<a name="l00631"></a>00631         <span class="keywordflow">return</span> (!my_key_compare_object(key1, key2));
+<a name="l00632"></a>00632     }
+<a name="l00633"></a>00633 
+<a name="l00634"></a>00634     Hasher       my_hash_object;        <span class="comment">// The hash object</span>
+<a name="l00635"></a>00635     Key_equality my_key_compare_object; <span class="comment">// The equality comparator object</span>
+<a name="l00636"></a>00636 };
+<a name="l00637"></a>00637 
+<a name="l00638"></a>00638 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00639"></a>00639 <span class="preprocessor"></span><span class="preprocessor">#pragma warning(push)</span>
+<a name="l00640"></a>00640 <span class="preprocessor"></span><span class="preprocessor">#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)</span>
+<a name="l00641"></a>00641 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00642"></a>00642 <span class="preprocessor"></span>
+<a name="l00643"></a>00643 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Traits&gt;
+<a name="l00644"></a>00644 <span class="keyword">class </span>concurrent_unordered_base : <span class="keyword">public</span> Traits
+<a name="l00645"></a>00645 {
+<a name="l00646"></a>00646 <span class="keyword">protected</span>:
+<a name="l00647"></a>00647     <span class="comment">// Type definitions</span>
+<a name="l00648"></a>00648     <span class="keyword">typedef</span> concurrent_unordered_base&lt;Traits&gt; self_type;
+<a name="l00649"></a>00649     <span class="keyword">typedef</span> <span class="keyword">typename</span> Traits::value_type value_type;
+<a name="l00650"></a>00650     <span class="keyword">typedef</span> <span class="keyword">typename</span> Traits::key_type key_type;
+<a name="l00651"></a>00651     <span class="keyword">typedef</span> <span class="keyword">typename</span> Traits::hash_compare hash_compare;
+<a name="l00652"></a>00652     <span class="keyword">typedef</span> <span class="keyword">typename</span> Traits::value_compare value_compare;
+<a name="l00653"></a>00653     <span class="keyword">typedef</span> <span class="keyword">typename</span> Traits::allocator_type allocator_type;
+<a name="l00654"></a>00654     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::pointer pointer;
+<a name="l00655"></a>00655     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::const_pointer const_pointer;
+<a name="l00656"></a>00656     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::reference reference;
+<a name="l00657"></a>00657     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::const_reference const_reference;
+<a name="l00658"></a>00658     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::size_type size_type;
+<a name="l00659"></a>00659     <span class="keyword">typedef</span> <span class="keyword">typename</span> allocator_type::difference_type difference_type;
+<a name="l00660"></a>00660     <span class="keyword">typedef</span> split_ordered_list&lt;value_type, typename Traits::allocator_type&gt; solist_t;
+<a name="l00661"></a>00661     <span class="keyword">typedef</span> <span class="keyword">typename</span> solist_t::nodeptr_t nodeptr_t;
+<a name="l00662"></a>00662     <span class="comment">// Iterators that walk the entire split-order list, including dummy nodes</span>
+<a name="l00663"></a>00663     <span class="keyword">typedef</span> <span class="keyword">typename</span> solist_t::raw_iterator raw_iterator;
+<a name="l00664"></a>00664     <span class="keyword">typedef</span> <span class="keyword">typename</span> solist_t::raw_const_iterator raw_const_iterator;
+<a name="l00665"></a>00665     <span class="keyword">typedef</span> <span class="keyword">typename</span> solist_t::iterator iterator; <span class="comment">// TODO: restore const iterator for unordered_sets</span>
+<a name="l00666"></a>00666     <span class="keyword">typedef</span> <span class="keyword">typename</span> solist_t::const_iterator const_iterator;
+<a name="l00667"></a>00667     <span class="keyword">typedef</span> iterator local_iterator;
+<a name="l00668"></a>00668     <span class="keyword">typedef</span> const_iterator const_local_iterator;
+<a name="l00669"></a>00669     <span class="keyword">using</span> Traits::my_hash_compare;
+<a name="l00670"></a>00670     <span class="keyword">using</span> Traits::get_key;
+<a name="l00671"></a>00671     <span class="keyword">using</span> Traits::allow_multimapping;
+<a name="l00672"></a>00672 
+<a name="l00673"></a>00673 <span class="keyword">private</span>:
+<a name="l00674"></a>00674     <span class="keyword">typedef</span> std::pair&lt;iterator, iterator&gt; pairii_t;
+<a name="l00675"></a>00675     <span class="keyword">typedef</span> std::pair&lt;const_iterator, const_iterator&gt; paircc_t;
+<a name="l00676"></a>00676 
+<a name="l00677"></a>00677     <span class="keyword">static</span> size_type <span class="keyword">const</span> pointers_per_table = <span class="keyword">sizeof</span>(size_type) * 8;              <span class="comment">// One bucket segment per bit</span>
+<a name="l00678"></a>00678     <span class="keyword">static</span> <span class="keyword">const</span> size_type initial_bucket_number = 8;                               <span class="comment">// Initial number of buckets</span>
+<a name="l00679"></a>00679     <span class="keyword">static</span> <span class="keyword">const</span> size_type initial_bucket_load = 4;                                <span class="comment">// Initial maximum number of elements per bucket</span>
+<a name="l00680"></a>00680 
+<a name="l00681"></a>00681 <span class="keyword">protected</span>:
+<a name="l00682"></a>00682     <span class="comment">// Constructors/Destructors</span>
+<a name="l00683"></a>00683     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
+<a name="l00684"></a>00684         <span class="keyword">const</span> hash_compare&amp; hc = hash_compare(), <span class="keyword">const</span> allocator_type&amp; a = allocator_type())
+<a name="l00685"></a>00685         : Traits(hc), my_solist(a),
+<a name="l00686"></a>00686           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
+<a name="l00687"></a>00687     {
+<a name="l00688"></a>00688         <span class="keywordflow">if</span>( n_of_buckets == 0) ++n_of_buckets;
+<a name="l00689"></a>00689         my_number_of_buckets = 1&lt;&lt;__TBB_Log2((uintptr_t)n_of_buckets*2-1); <span class="comment">// round up to power of 2</span>
+<a name="l00690"></a>00690         internal_init();
+<a name="l00691"></a>00691     }
+<a name="l00692"></a>00692 
+<a name="l00693"></a>00693     concurrent_unordered_base(<span class="keyword">const</span> concurrent_unordered_base&amp; right, <span class="keyword">const</span> allocator_type&amp; a)
+<a name="l00694"></a>00694         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
+<a name="l00695"></a>00695     {
+<a name="l00696"></a>00696         internal_copy(right);
+<a name="l00697"></a>00697     }
+<a name="l00698"></a>00698 
+<a name="l00699"></a>00699     concurrent_unordered_base(<span class="keyword">const</span> concurrent_unordered_base&amp; right)
+<a name="l00700"></a>00700         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
+<a name="l00701"></a>00701     {
+<a name="l00702"></a>00702         internal_init();
+<a name="l00703"></a>00703         internal_copy(right);
+<a name="l00704"></a>00704     }
+<a name="l00705"></a>00705 
+<a name="l00706"></a>00706     concurrent_unordered_base&amp; operator=(<span class="keyword">const</span> concurrent_unordered_base&amp; right) {
+<a name="l00707"></a>00707         <span class="keywordflow">if</span> (<span class="keyword">this</span> != &amp;right)
+<a name="l00708"></a>00708             internal_copy(right);
+<a name="l00709"></a>00709         <span class="keywordflow">return</span> (*<span class="keyword">this</span>);
+<a name="l00710"></a>00710     }
+<a name="l00711"></a>00711 
+<a name="l00712"></a>00712     ~concurrent_unordered_base() {
+<a name="l00713"></a>00713         <span class="comment">// Delete all node segments</span>
+<a name="l00714"></a>00714         internal_clear();
+<a name="l00715"></a>00715     }
+<a name="l00716"></a>00716 
+<a name="l00717"></a>00717 <span class="keyword">public</span>:
+<a name="l00718"></a>00718     allocator_type get_allocator()<span class="keyword"> const </span>{
+<a name="l00719"></a>00719         <span class="keywordflow">return</span> my_solist.get_allocator();
+<a name="l00720"></a>00720     }
+<a name="l00721"></a>00721 
+<a name="l00722"></a>00722     <span class="comment">// Size and capacity function</span>
+<a name="l00723"></a>00723     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{
+<a name="l00724"></a>00724         <span class="keywordflow">return</span> my_solist.empty();
+<a name="l00725"></a>00725     }
+<a name="l00726"></a>00726 
+<a name="l00727"></a>00727     size_type size()<span class="keyword"> const </span>{
+<a name="l00728"></a>00728         <span class="keywordflow">return</span> my_solist.size();
+<a name="l00729"></a>00729     }
+<a name="l00730"></a>00730 
+<a name="l00731"></a>00731     size_type max_size()<span class="keyword"> const </span>{
+<a name="l00732"></a>00732         <span class="keywordflow">return</span> my_solist.max_size();
+<a name="l00733"></a>00733     }
+<a name="l00734"></a>00734 
+<a name="l00735"></a>00735     <span class="comment">// Iterators </span>
+<a name="l00736"></a>00736     iterator begin() {
+<a name="l00737"></a>00737         <span class="keywordflow">return</span> my_solist.begin();
+<a name="l00738"></a>00738     }
+<a name="l00739"></a>00739 
+<a name="l00740"></a>00740     const_iterator begin()<span class="keyword"> const </span>{
+<a name="l00741"></a>00741         <span class="keywordflow">return</span> my_solist.begin();
+<a name="l00742"></a>00742     }
+<a name="l00743"></a>00743 
+<a name="l00744"></a>00744     iterator end() {
+<a name="l00745"></a>00745         <span class="keywordflow">return</span> my_solist.end();
+<a name="l00746"></a>00746     }
+<a name="l00747"></a>00747 
+<a name="l00748"></a>00748     const_iterator end()<span class="keyword"> const </span>{
+<a name="l00749"></a>00749         <span class="keywordflow">return</span> my_solist.end();
+<a name="l00750"></a>00750     }
+<a name="l00751"></a>00751 
+<a name="l00752"></a>00752     const_iterator cbegin()<span class="keyword"> const </span>{
+<a name="l00753"></a>00753         <span class="keywordflow">return</span> my_solist.cbegin();
+<a name="l00754"></a>00754     }
+<a name="l00755"></a>00755 
+<a name="l00756"></a>00756     const_iterator cend()<span class="keyword"> const </span>{
+<a name="l00757"></a>00757         <span class="keywordflow">return</span> my_solist.cend();
+<a name="l00758"></a>00758     }
+<a name="l00759"></a>00759 
+<a name="l00760"></a>00760     <span class="comment">// Parallel traversal support</span>
+<a name="l00761"></a>00761     <span class="keyword">class </span>const_range_type : tbb::internal::no_assign {
+<a name="l00762"></a>00762         <span class="keyword">const</span> concurrent_unordered_base &amp;my_table;
+<a name="l00763"></a>00763         raw_const_iterator my_begin_node;
+<a name="l00764"></a>00764         raw_const_iterator my_end_node;
+<a name="l00765"></a>00765         <span class="keyword">mutable</span> raw_const_iterator my_midpoint_node;
+<a name="l00766"></a>00766     <span class="keyword">public</span>:
+<a name="l00768"></a>00768         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::size_type size_type;
+<a name="l00769"></a>00769         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::value_type value_type;
+<a name="l00770"></a>00770         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::reference reference;
+<a name="l00771"></a>00771         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::difference_type difference_type;
+<a name="l00772"></a>00772         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::const_iterator iterator;
+<a name="l00773"></a>00773 
+<a name="l00775"></a>00775         <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_begin_node == my_end_node;}
+<a name="l00776"></a>00776 
+<a name="l00778"></a>00778         <span class="keywordtype">bool</span> is_divisible()<span class="keyword"> const </span>{
+<a name="l00779"></a>00779             <span class="keywordflow">return</span> my_midpoint_node != my_end_node;
+<a name="l00780"></a>00780         }
+<a name="l00782"></a>00782         const_range_type( const_range_type &amp;r, split ) : 
+<a name="l00783"></a>00783             my_table(r.my_table), my_end_node(r.my_end_node)
+<a name="l00784"></a>00784         {
+<a name="l00785"></a>00785             r.my_end_node = my_begin_node = r.my_midpoint_node;
+<a name="l00786"></a>00786             __TBB_ASSERT( !empty(), <span class="stringliteral">"Splitting despite the range is not divisible"</span> );
+<a name="l00787"></a>00787             __TBB_ASSERT( !r.empty(), <span class="stringliteral">"Splitting despite the range is not divisible"</span> );
+<a name="l00788"></a>00788             set_midpoint();
+<a name="l00789"></a>00789             r.set_midpoint();
+<a name="l00790"></a>00790         }
+<a name="l00792"></a>00792         const_range_type( <span class="keyword">const</span> concurrent_unordered_base &amp;a_table ) : 
+<a name="l00793"></a>00793             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
+<a name="l00794"></a>00794             my_end_node(a_table.my_solist.end())
+<a name="l00795"></a>00795         {
+<a name="l00796"></a>00796             set_midpoint();
+<a name="l00797"></a>00797         }
+<a name="l00798"></a>00798         iterator begin()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_table.my_solist.get_iterator(my_begin_node); }
+<a name="l00799"></a>00799         iterator end()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_table.my_solist.get_iterator(my_end_node); }
+<a name="l00801"></a>00801         size_type grainsize()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> 1; }
+<a name="l00802"></a>00802 
+<a name="l00804"></a>00804         <span class="keywordtype">void</span> set_midpoint()<span class="keyword"> const </span>{
+<a name="l00805"></a>00805             <span class="keywordflow">if</span>( my_begin_node == my_end_node ) <span class="comment">// not divisible</span>
+<a name="l00806"></a>00806                 my_midpoint_node = my_end_node;
+<a name="l00807"></a>00807             <span class="keywordflow">else</span> {
+<a name="l00808"></a>00808                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
+<a name="l00809"></a>00809                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
+<a name="l00810"></a>00810                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
+<a name="l00811"></a>00811                 <span class="keywordflow">while</span> ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
+<a name="l00812"></a>00812                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
+<a name="l00813"></a>00813                 <span class="keywordflow">if</span>( my_midpoint_node == my_begin_node )
+<a name="l00814"></a>00814                     my_midpoint_node = my_end_node;
+<a name="l00815"></a>00815 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00816"></a>00816 <span class="preprocessor"></span>                <span class="keywordflow">else</span> {
+<a name="l00817"></a>00817                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
+<a name="l00818"></a>00818                     __TBB_ASSERT( begin_key &lt; mid_key, <span class="stringliteral">"my_begin_node is after my_midpoint_node"</span> );
+<a name="l00819"></a>00819                     __TBB_ASSERT( mid_key &lt;= end_key, <span class="stringliteral">"my_midpoint_node is after my_end_node"</span> );
+<a name="l00820"></a>00820                 }
+<a name="l00821"></a>00821 <span class="preprocessor">#endif // TBB_USE_ASSERT</span>
+<a name="l00822"></a>00822 <span class="preprocessor"></span>            }
+<a name="l00823"></a>00823         }
+<a name="l00824"></a>00824     };
+<a name="l00825"></a>00825 
+<a name="l00826"></a>00826     <span class="keyword">class </span>range_type : <span class="keyword">public</span> const_range_type {
+<a name="l00827"></a>00827     <span class="keyword">public</span>:
+<a name="l00828"></a>00828         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_unordered_base::iterator iterator;
+<a name="l00830"></a>00830         range_type( range_type &amp;r, split ) : const_range_type( r, split() ) {}
+<a name="l00832"></a>00832         range_type( <span class="keyword">const</span> concurrent_unordered_base &amp;a_table ) : const_range_type(a_table) {}
+<a name="l00833"></a>00833 
+<a name="l00834"></a>00834         iterator begin()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> solist_t::get_iterator( const_range_type::begin() ); }
+<a name="l00835"></a>00835         iterator end()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> solist_t::get_iterator( const_range_type::end() ); }
+<a name="l00836"></a>00836     };
+<a name="l00837"></a>00837 
+<a name="l00838"></a>00838     range_type range() {
+<a name="l00839"></a>00839         <span class="keywordflow">return</span> range_type( *<span class="keyword">this</span> );
+<a name="l00840"></a>00840     }
+<a name="l00841"></a>00841 
+<a name="l00842"></a>00842     const_range_type range()<span class="keyword"> const </span>{
+<a name="l00843"></a>00843         <span class="keywordflow">return</span> const_range_type( *<span class="keyword">this</span> );
+<a name="l00844"></a>00844     }
+<a name="l00845"></a>00845 
+<a name="l00846"></a>00846     <span class="comment">// Modifiers</span>
+<a name="l00847"></a>00847     std::pair&lt;iterator, bool&gt; insert(<span class="keyword">const</span> value_type&amp; value) {
+<a name="l00848"></a>00848         <span class="keywordflow">return</span> internal_insert(value);
+<a name="l00849"></a>00849     }
+<a name="l00850"></a>00850 
+<a name="l00851"></a>00851     iterator insert(const_iterator, <span class="keyword">const</span> value_type&amp; value) {
+<a name="l00852"></a>00852         <span class="comment">// Ignore hint</span>
+<a name="l00853"></a>00853         <span class="keywordflow">return</span> insert(value).first;
+<a name="l00854"></a>00854     }
+<a name="l00855"></a>00855 
+<a name="l00856"></a>00856     <span class="keyword">template</span>&lt;<span class="keyword">class</span> Iterator&gt;
+<a name="l00857"></a>00857     <span class="keywordtype">void</span> insert(Iterator first, Iterator last) {
+<a name="l00858"></a>00858         <span class="keywordflow">for</span> (Iterator it = first; it != last; ++it)
+<a name="l00859"></a>00859             insert(*it);
+<a name="l00860"></a>00860     }
+<a name="l00861"></a>00861 
+<a name="l00862"></a>00862     iterator unsafe_erase(const_iterator where) {
+<a name="l00863"></a>00863         <span class="keywordflow">return</span> internal_erase(where);
+<a name="l00864"></a>00864     }
+<a name="l00865"></a>00865 
+<a name="l00866"></a>00866     iterator unsafe_erase(const_iterator first, const_iterator last) {
+<a name="l00867"></a>00867         <span class="keywordflow">while</span> (first != last)
+<a name="l00868"></a>00868             unsafe_erase(first++);
+<a name="l00869"></a>00869         <span class="keywordflow">return</span> my_solist.get_iterator(first);
+<a name="l00870"></a>00870     }
+<a name="l00871"></a>00871 
+<a name="l00872"></a>00872     size_type unsafe_erase(<span class="keyword">const</span> key_type&amp; key) {
+<a name="l00873"></a>00873         pairii_t where = equal_range(key);
+<a name="l00874"></a>00874         size_type item_count = internal_distance(where.first, where.second);
+<a name="l00875"></a>00875         unsafe_erase(where.first, where.second);
+<a name="l00876"></a>00876         <span class="keywordflow">return</span> item_count;
+<a name="l00877"></a>00877     }
+<a name="l00878"></a>00878 
+<a name="l00879"></a>00879     <span class="keywordtype">void</span> swap(concurrent_unordered_base&amp; right) {
+<a name="l00880"></a>00880         <span class="keywordflow">if</span> (<span class="keyword">this</span> != &amp;right) {
+<a name="l00881"></a>00881             std::swap(my_hash_compare, right.my_hash_compare); <span class="comment">// TODO: check what ADL meant here</span>
+<a name="l00882"></a>00882             my_solist.swap(right.my_solist);
+<a name="l00883"></a>00883             internal_swap_buckets(right);
+<a name="l00884"></a>00884             std::swap(my_number_of_buckets, right.my_number_of_buckets);
+<a name="l00885"></a>00885             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
+<a name="l00886"></a>00886         }
+<a name="l00887"></a>00887     }
+<a name="l00888"></a>00888 
+<a name="l00889"></a>00889     <span class="comment">// Observers</span>
+<a name="l00890"></a>00890     <span class="keywordtype">void</span> clear() {
+<a name="l00891"></a>00891         <span class="comment">// Clear list</span>
+<a name="l00892"></a>00892         my_solist.clear();
+<a name="l00893"></a>00893 
+<a name="l00894"></a>00894         <span class="comment">// Clear buckets</span>
+<a name="l00895"></a>00895         internal_clear();
+<a name="l00896"></a>00896     }
+<a name="l00897"></a>00897 
+<a name="l00898"></a>00898     <span class="comment">// Lookup</span>
+<a name="l00899"></a>00899     iterator find(<span class="keyword">const</span> key_type&amp; key) {
+<a name="l00900"></a>00900         <span class="keywordflow">return</span> internal_find(key);
+<a name="l00901"></a>00901     }
+<a name="l00902"></a>00902 
+<a name="l00903"></a>00903     const_iterator find(<span class="keyword">const</span> key_type&amp; key)<span class="keyword"> const </span>{
+<a name="l00904"></a>00904         <span class="keywordflow">return</span> const_cast&lt;self_type*&gt;(<span class="keyword">this</span>)-&gt;internal_find(key);
+<a name="l00905"></a>00905     }
+<a name="l00906"></a>00906 
+<a name="l00907"></a>00907     size_type count(<span class="keyword">const</span> key_type&amp; key)<span class="keyword"> const </span>{
+<a name="l00908"></a>00908         <span class="keywordflow">if</span>(allow_multimapping) {
+<a name="l00909"></a>00909             paircc_t answer = equal_range(key);
+<a name="l00910"></a>00910             size_type item_count = internal_distance(answer.first, answer.second);
+<a name="l00911"></a>00911             <span class="keywordflow">return</span> item_count;
+<a name="l00912"></a>00912         } <span class="keywordflow">else</span> {
+<a name="l00913"></a>00913             <span class="keywordflow">return</span> const_cast&lt;self_type*&gt;(<span class="keyword">this</span>)-&gt;internal_find(key) == end()?0:1;
+<a name="l00914"></a>00914         }
+<a name="l00915"></a>00915     }
+<a name="l00916"></a>00916 
+<a name="l00917"></a>00917     std::pair&lt;iterator, iterator&gt; equal_range(<span class="keyword">const</span> key_type&amp; key) {
+<a name="l00918"></a>00918         <span class="keywordflow">return</span> internal_equal_range(key);
+<a name="l00919"></a>00919     }
+<a name="l00920"></a>00920 
+<a name="l00921"></a>00921     std::pair&lt;const_iterator, const_iterator&gt; equal_range(<span class="keyword">const</span> key_type&amp; key)<span class="keyword"> const </span>{
+<a name="l00922"></a>00922         <span class="keywordflow">return</span> const_cast&lt;self_type*&gt;(<span class="keyword">this</span>)-&gt;internal_equal_range(key);
+<a name="l00923"></a>00923     }
+<a name="l00924"></a>00924 
+<a name="l00925"></a>00925     <span class="comment">// Bucket interface - for debugging </span>
+<a name="l00926"></a>00926     size_type unsafe_bucket_count()<span class="keyword"> const </span>{
+<a name="l00927"></a>00927         <span class="keywordflow">return</span> my_number_of_buckets;
+<a name="l00928"></a>00928     }
+<a name="l00929"></a>00929 
+<a name="l00930"></a>00930     size_type unsafe_max_bucket_count()<span class="keyword"> const </span>{
+<a name="l00931"></a>00931         <span class="keywordflow">return</span> segment_size(pointers_per_table-1);
+<a name="l00932"></a>00932     }
+<a name="l00933"></a>00933 
+<a name="l00934"></a>00934     size_type unsafe_bucket_size(size_type bucket) {
+<a name="l00935"></a>00935         size_type item_count = 0;
+<a name="l00936"></a>00936         <span class="keywordflow">if</span> (is_initialized(bucket)) {
+<a name="l00937"></a>00937             raw_iterator it = get_bucket(bucket);
+<a name="l00938"></a>00938             ++it;
+<a name="l00939"></a>00939             <span class="keywordflow">for</span> (; it != my_solist.raw_end() &amp;&amp; !it.get_node_ptr()-&gt;is_dummy(); ++it)
+<a name="l00940"></a>00940                 ++item_count;
+<a name="l00941"></a>00941         }
+<a name="l00942"></a>00942         <span class="keywordflow">return</span> item_count;
+<a name="l00943"></a>00943     }
+<a name="l00944"></a>00944 
+<a name="l00945"></a>00945     size_type unsafe_bucket(<span class="keyword">const</span> key_type&amp; key)<span class="keyword"> const </span>{
+<a name="l00946"></a>00946         sokey_t order_key = (sokey_t) my_hash_compare(key);
+<a name="l00947"></a>00947         size_type bucket = order_key % my_number_of_buckets;
+<a name="l00948"></a>00948         <span class="keywordflow">return</span> bucket;
+<a name="l00949"></a>00949     }
+<a name="l00950"></a>00950 
+<a name="l00951"></a>00951     <span class="comment">// If the bucket is initialized, return a first non-dummy element in it</span>
+<a name="l00952"></a>00952     local_iterator unsafe_begin(size_type bucket) {
+<a name="l00953"></a>00953         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l00954"></a>00954             <span class="keywordflow">return</span> end();
+<a name="l00955"></a>00955 
+<a name="l00956"></a>00956         raw_iterator it = get_bucket(bucket);
+<a name="l00957"></a>00957         <span class="keywordflow">return</span> my_solist.first_real_iterator(it);
+<a name="l00958"></a>00958     }
+<a name="l00959"></a>00959 
+<a name="l00960"></a>00960     <span class="comment">// If the bucket is initialized, return a first non-dummy element in it</span>
+<a name="l00961"></a>00961     const_local_iterator unsafe_begin(size_type bucket)<span class="keyword"> const</span>
+<a name="l00962"></a>00962 <span class="keyword">    </span>{
+<a name="l00963"></a>00963         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l00964"></a>00964             <span class="keywordflow">return</span> end();
+<a name="l00965"></a>00965 
+<a name="l00966"></a>00966         raw_const_iterator it = get_bucket(bucket);
+<a name="l00967"></a>00967         <span class="keywordflow">return</span> my_solist.first_real_iterator(it);
+<a name="l00968"></a>00968     }
+<a name="l00969"></a>00969 
+<a name="l00970"></a>00970     <span class="comment">// @REVIEW: Takes O(n)</span>
+<a name="l00971"></a>00971     <span class="comment">// Returns the iterator after the last non-dummy element in the bucket</span>
+<a name="l00972"></a>00972     local_iterator unsafe_end(size_type bucket)
+<a name="l00973"></a>00973     {
+<a name="l00974"></a>00974         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l00975"></a>00975             <span class="keywordflow">return</span> end();
+<a name="l00976"></a>00976 
+<a name="l00977"></a>00977         raw_iterator it = get_bucket(bucket);
+<a name="l00978"></a>00978     
+<a name="l00979"></a>00979         <span class="comment">// Find the end of the bucket, denoted by the dummy element</span>
+<a name="l00980"></a>00980         <span class="keywordflow">do</span> ++it;
+<a name="l00981"></a>00981         <span class="keywordflow">while</span>(it != my_solist.raw_end() &amp;&amp; !it.get_node_ptr()-&gt;is_dummy());
+<a name="l00982"></a>00982 
+<a name="l00983"></a>00983         <span class="comment">// Return the first real element past the end of the bucket</span>
+<a name="l00984"></a>00984         <span class="keywordflow">return</span> my_solist.first_real_iterator(it);
+<a name="l00985"></a>00985     }
+<a name="l00986"></a>00986 
+<a name="l00987"></a>00987     <span class="comment">// @REVIEW: Takes O(n)</span>
+<a name="l00988"></a>00988     <span class="comment">// Returns the iterator after the last non-dummy element in the bucket</span>
+<a name="l00989"></a>00989     const_local_iterator unsafe_end(size_type bucket)<span class="keyword"> const</span>
+<a name="l00990"></a>00990 <span class="keyword">    </span>{
+<a name="l00991"></a>00991         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l00992"></a>00992             <span class="keywordflow">return</span> end();
+<a name="l00993"></a>00993 
+<a name="l00994"></a>00994         raw_const_iterator it = get_bucket(bucket);
+<a name="l00995"></a>00995     
+<a name="l00996"></a>00996         <span class="comment">// Find the end of the bucket, denoted by the dummy element</span>
+<a name="l00997"></a>00997         <span class="keywordflow">do</span> ++it;
+<a name="l00998"></a>00998         <span class="keywordflow">while</span>(it != my_solist.raw_end() &amp;&amp; !it.get_node_ptr()-&gt;is_dummy());
+<a name="l00999"></a>00999 
+<a name="l01000"></a>01000         <span class="comment">// Return the first real element past the end of the bucket</span>
+<a name="l01001"></a>01001         <span class="keywordflow">return</span> my_solist.first_real_iterator(it);
+<a name="l01002"></a>01002     }
+<a name="l01003"></a>01003 
+<a name="l01004"></a>01004     const_local_iterator unsafe_cbegin(size_type bucket)<span class="keyword"> const </span>{
+<a name="l01005"></a>01005         <span class="keywordflow">return</span> ((<span class="keyword">const</span> self_type *) <span class="keyword">this</span>)-&gt;begin();
+<a name="l01006"></a>01006     }
+<a name="l01007"></a>01007 
+<a name="l01008"></a>01008     const_local_iterator unsafe_cend(size_type bucket)<span class="keyword"> const </span>{
+<a name="l01009"></a>01009         <span class="keywordflow">return</span> ((<span class="keyword">const</span> self_type *) <span class="keyword">this</span>)-&gt;end();
+<a name="l01010"></a>01010     }
+<a name="l01011"></a>01011 
+<a name="l01012"></a>01012     <span class="comment">// Hash policy</span>
+<a name="l01013"></a>01013     <span class="keywordtype">float</span> load_factor()<span class="keyword"> const </span>{
+<a name="l01014"></a>01014         <span class="keywordflow">return</span> (<span class="keywordtype">float</span>) size() / (float) unsafe_bucket_count();
+<a name="l01015"></a>01015     }
+<a name="l01016"></a>01016 
+<a name="l01017"></a>01017     <span class="keywordtype">float</span> max_load_factor()<span class="keyword"> const </span>{
+<a name="l01018"></a>01018         <span class="keywordflow">return</span> my_maximum_bucket_size;
+<a name="l01019"></a>01019     }
+<a name="l01020"></a>01020 
+<a name="l01021"></a>01021     <span class="keywordtype">void</span> max_load_factor(<span class="keywordtype">float</span> newmax) {
+<a name="l01022"></a>01022         <span class="keywordflow">if</span> (newmax != newmax || newmax &lt; 0)
+<a name="l01023"></a>01023             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
+<a name="l01024"></a>01024         my_maximum_bucket_size = newmax;
+<a name="l01025"></a>01025     }
+<a name="l01026"></a>01026 
+<a name="l01027"></a>01027     <span class="comment">// This function is a noop, because the underlying split-ordered list</span>
+<a name="l01028"></a>01028     <span class="comment">// is already sorted, so an increase in the bucket number will be</span>
+<a name="l01029"></a>01029     <span class="comment">// reflected next time this bucket is touched.</span>
+<a name="l01030"></a>01030     <span class="keywordtype">void</span> rehash(size_type buckets) {
+<a name="l01031"></a>01031         size_type current_buckets = my_number_of_buckets;
+<a name="l01032"></a>01032         <span class="keywordflow">if</span> (current_buckets &gt;= buckets)
+<a name="l01033"></a>01033             <span class="keywordflow">return</span>;
+<a name="l01034"></a>01034         my_number_of_buckets = 1&lt;&lt;__TBB_Log2((uintptr_t)buckets*2-1); <span class="comment">// round up to power of 2</span>
+<a name="l01035"></a>01035     }
+<a name="l01036"></a>01036 
+<a name="l01037"></a>01037 <span class="keyword">private</span>:
+<a name="l01038"></a>01038 
+<a name="l01039"></a>01039     <span class="comment">// Initialize the hash and keep the first bucket open</span>
+<a name="l01040"></a>01040     <span class="keywordtype">void</span> internal_init() {
+<a name="l01041"></a>01041         <span class="comment">// Allocate an array of segment pointers</span>
+<a name="l01042"></a>01042         memset(my_buckets, 0, pointers_per_table * <span class="keyword">sizeof</span>(<span class="keywordtype">void</span> *));
+<a name="l01043"></a>01043 
+<a name="l01044"></a>01044         <span class="comment">// Insert the first element in the split-ordered list</span>
+<a name="l01045"></a>01045         raw_iterator dummy_node = my_solist.raw_begin();
+<a name="l01046"></a>01046         set_bucket(0, dummy_node);
+<a name="l01047"></a>01047     }
+<a name="l01048"></a>01048 
+<a name="l01049"></a>01049     <span class="keywordtype">void</span> internal_clear() {
+<a name="l01050"></a>01050         <span class="keywordflow">for</span> (size_type index = 0; index &lt; pointers_per_table; ++index) {
+<a name="l01051"></a>01051             <span class="keywordflow">if</span> (my_buckets[index] != NULL) {
+<a name="l01052"></a>01052                 size_type sz = segment_size(index);
+<a name="l01053"></a>01053                 <span class="keywordflow">for</span> (size_type index2 = 0; index2 &lt; sz; ++index2)
+<a name="l01054"></a>01054                     my_allocator.destroy(&amp;my_buckets[index][index2]);
+<a name="l01055"></a>01055                 my_allocator.deallocate(my_buckets[index], sz);
+<a name="l01056"></a>01056                 my_buckets[index] = 0;
+<a name="l01057"></a>01057             }
+<a name="l01058"></a>01058         }
+<a name="l01059"></a>01059     }
+<a name="l01060"></a>01060 
+<a name="l01061"></a>01061     <span class="keywordtype">void</span> internal_copy(<span class="keyword">const</span> self_type&amp; right) {
+<a name="l01062"></a>01062         clear();
+<a name="l01063"></a>01063 
+<a name="l01064"></a>01064         my_maximum_bucket_size = right.my_maximum_bucket_size;
+<a name="l01065"></a>01065         my_number_of_buckets = right.my_number_of_buckets;
+<a name="l01066"></a>01066 
+<a name="l01067"></a>01067         __TBB_TRY {
+<a name="l01068"></a>01068             insert(right.begin(), right.end());
+<a name="l01069"></a>01069             my_hash_compare = right.my_hash_compare;
+<a name="l01070"></a>01070         } __TBB_CATCH(...) {
+<a name="l01071"></a>01071             my_solist.clear();
+<a name="l01072"></a>01072             __TBB_RETHROW();
+<a name="l01073"></a>01073         }
+<a name="l01074"></a>01074     }
+<a name="l01075"></a>01075 
+<a name="l01076"></a>01076     <span class="keywordtype">void</span> internal_swap_buckets(concurrent_unordered_base&amp; right)
+<a name="l01077"></a>01077     {
+<a name="l01078"></a>01078         <span class="comment">// Swap all node segments</span>
+<a name="l01079"></a>01079         <span class="keywordflow">for</span> (size_type index = 0; index &lt; pointers_per_table; ++index)
+<a name="l01080"></a>01080         {
+<a name="l01081"></a>01081             raw_iterator * iterator_pointer = my_buckets[index];
+<a name="l01082"></a>01082             my_buckets[index] = right.my_buckets[index];
+<a name="l01083"></a>01083             right.my_buckets[index] = iterator_pointer;
+<a name="l01084"></a>01084         }
+<a name="l01085"></a>01085     }
+<a name="l01086"></a>01086 
+<a name="l01087"></a>01087     <span class="comment">// Hash APIs</span>
+<a name="l01088"></a>01088     size_type internal_distance(const_iterator first, const_iterator last)<span class="keyword"> const</span>
+<a name="l01089"></a>01089 <span class="keyword">    </span>{
+<a name="l01090"></a>01090         size_type num = 0;
+<a name="l01091"></a>01091 
+<a name="l01092"></a>01092         <span class="keywordflow">for</span> (const_iterator it = first; it != last; ++it)
+<a name="l01093"></a>01093             ++num;
+<a name="l01094"></a>01094 
+<a name="l01095"></a>01095         <span class="keywordflow">return</span> num;
+<a name="l01096"></a>01096     }
+<a name="l01097"></a>01097 
+<a name="l01098"></a>01098     <span class="comment">// Insert an element in the hash given its value</span>
+<a name="l01099"></a>01099     std::pair&lt;iterator, bool&gt; internal_insert(<span class="keyword">const</span> value_type&amp; value)
+<a name="l01100"></a>01100     {
+<a name="l01101"></a>01101         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
+<a name="l01102"></a>01102         size_type bucket = order_key % my_number_of_buckets;
+<a name="l01103"></a>01103 
+<a name="l01104"></a>01104         <span class="comment">// If bucket is empty, initialize it first</span>
+<a name="l01105"></a>01105         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l01106"></a>01106             init_bucket(bucket);
+<a name="l01107"></a>01107 
+<a name="l01108"></a>01108         size_type new_count;
+<a name="l01109"></a>01109         order_key = split_order_key_regular(order_key);
+<a name="l01110"></a>01110         raw_iterator it = get_bucket(bucket);
+<a name="l01111"></a>01111         raw_iterator last = my_solist.raw_end();
+<a name="l01112"></a>01112         raw_iterator where = it;
+<a name="l01113"></a>01113 
+<a name="l01114"></a>01114         __TBB_ASSERT(where != last, <span class="stringliteral">"Invalid head node"</span>);
+<a name="l01115"></a>01115 
+<a name="l01116"></a>01116         <span class="comment">// First node is a dummy node</span>
+<a name="l01117"></a>01117         ++where;
+<a name="l01118"></a>01118 
+<a name="l01119"></a>01119         <span class="keywordflow">for</span> (;;)
+<a name="l01120"></a>01120         {
+<a name="l01121"></a>01121             <span class="keywordflow">if</span> (where == last || solist_t::get_order_key(where) &gt; order_key)
+<a name="l01122"></a>01122             {
+<a name="l01123"></a>01123                 <span class="comment">// Try to insert it in the right place</span>
+<a name="l01124"></a>01124                 std::pair&lt;iterator, bool&gt; result = my_solist.try_insert(it, where, value, order_key, &amp;new_count);
+<a name="l01125"></a>01125                 
+<a name="l01126"></a>01126                 <span class="keywordflow">if</span> (result.second)
+<a name="l01127"></a>01127                 {
+<a name="l01128"></a>01128                     <span class="comment">// Insertion succeeded, adjust the table size, if needed</span>
+<a name="l01129"></a>01129                     adjust_table_size(new_count, my_number_of_buckets);
+<a name="l01130"></a>01130                     <span class="keywordflow">return</span> result;
+<a name="l01131"></a>01131                 }
+<a name="l01132"></a>01132                 <span class="keywordflow">else</span>
+<a name="l01133"></a>01133                 {
+<a name="l01134"></a>01134                     <span class="comment">// Insertion failed: either the same node was inserted by another thread, or</span>
+<a name="l01135"></a>01135                     <span class="comment">// another element was inserted at exactly the same place as this node.</span>
+<a name="l01136"></a>01136                     <span class="comment">// Proceed with the search from the previous location where order key was</span>
+<a name="l01137"></a>01137                     <span class="comment">// known to be larger (note: this is legal only because there is no safe</span>
+<a name="l01138"></a>01138                     <span class="comment">// concurrent erase operation supported).</span>
+<a name="l01139"></a>01139                     where = it;
+<a name="l01140"></a>01140                     ++where;
+<a name="l01141"></a>01141                     <span class="keywordflow">continue</span>;
+<a name="l01142"></a>01142                 }
+<a name="l01143"></a>01143             }
+<a name="l01144"></a>01144             <span class="keywordflow">else</span> <span class="keywordflow">if</span> (!allow_multimapping &amp;&amp; solist_t::get_order_key(where) == order_key &amp;&amp; my_hash_compare(get_key(*where), get_key(value)) == 0)
+<a name="l01145"></a>01145             {
+<a name="l01146"></a>01146                 <span class="comment">// Element already in the list, return it</span>
+<a name="l01147"></a>01147                 <span class="keywordflow">return</span> std::pair&lt;iterator, bool&gt;(my_solist.get_iterator(where), <span class="keyword">false</span>);
+<a name="l01148"></a>01148             }
+<a name="l01149"></a>01149 
+<a name="l01150"></a>01150             <span class="comment">// Move the iterator forward</span>
+<a name="l01151"></a>01151             it = where;
+<a name="l01152"></a>01152             ++where;
+<a name="l01153"></a>01153         }
+<a name="l01154"></a>01154     }
+<a name="l01155"></a>01155 
+<a name="l01156"></a>01156     <span class="comment">// Find the element in the split-ordered list</span>
+<a name="l01157"></a>01157     iterator internal_find(<span class="keyword">const</span> key_type&amp; key)
+<a name="l01158"></a>01158     {
+<a name="l01159"></a>01159         sokey_t order_key = (sokey_t) my_hash_compare(key);
+<a name="l01160"></a>01160         size_type bucket = order_key % my_number_of_buckets;
+<a name="l01161"></a>01161 
+<a name="l01162"></a>01162         <span class="comment">// If bucket is empty, initialize it first</span>
+<a name="l01163"></a>01163         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l01164"></a>01164             init_bucket(bucket);
+<a name="l01165"></a>01165 
+<a name="l01166"></a>01166         order_key = split_order_key_regular(order_key);
+<a name="l01167"></a>01167         raw_iterator last = my_solist.raw_end();
+<a name="l01168"></a>01168 
+<a name="l01169"></a>01169         <span class="keywordflow">for</span> (raw_iterator it = get_bucket(bucket); it != last; ++it)
+<a name="l01170"></a>01170         {
+<a name="l01171"></a>01171             <span class="keywordflow">if</span> (solist_t::get_order_key(it) &gt; order_key)
+<a name="l01172"></a>01172             {
+<a name="l01173"></a>01173                 <span class="comment">// If the order key is smaller than the current order key, the element</span>
+<a name="l01174"></a>01174                 <span class="comment">// is not in the hash.</span>
+<a name="l01175"></a>01175                 <span class="keywordflow">return</span> end();
+<a name="l01176"></a>01176             }
+<a name="l01177"></a>01177             <span class="keywordflow">else</span> <span class="keywordflow">if</span> (solist_t::get_order_key(it) == order_key)
+<a name="l01178"></a>01178             {
+<a name="l01179"></a>01179                 <span class="comment">// The fact that order keys match does not mean that the element is found.</span>
+<a name="l01180"></a>01180                 <span class="comment">// Key function comparison has to be performed to check whether this is the</span>
+<a name="l01181"></a>01181                 <span class="comment">// right element. If not, keep searching while order key is the same.</span>
+<a name="l01182"></a>01182                 <span class="keywordflow">if</span> (!my_hash_compare(get_key(*it), key))
+<a name="l01183"></a>01183                     <span class="keywordflow">return</span> my_solist.get_iterator(it);
+<a name="l01184"></a>01184             }
+<a name="l01185"></a>01185         }
+<a name="l01186"></a>01186 
+<a name="l01187"></a>01187         <span class="keywordflow">return</span> end();
+<a name="l01188"></a>01188     }
+<a name="l01189"></a>01189 
+<a name="l01190"></a>01190     <span class="comment">// Erase an element from the list. This is not a concurrency safe function.</span>
+<a name="l01191"></a>01191     iterator internal_erase(const_iterator it)
+<a name="l01192"></a>01192     {
+<a name="l01193"></a>01193         key_type key = get_key(*it);
+<a name="l01194"></a>01194         sokey_t order_key = (sokey_t) my_hash_compare(key);
+<a name="l01195"></a>01195         size_type bucket = order_key % my_number_of_buckets;
+<a name="l01196"></a>01196 
+<a name="l01197"></a>01197         <span class="comment">// If bucket is empty, initialize it first</span>
+<a name="l01198"></a>01198         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l01199"></a>01199             init_bucket(bucket);
+<a name="l01200"></a>01200 
+<a name="l01201"></a>01201         order_key = split_order_key_regular(order_key);
+<a name="l01202"></a>01202 
+<a name="l01203"></a>01203         raw_iterator previous = get_bucket(bucket);
+<a name="l01204"></a>01204         raw_iterator last = my_solist.raw_end();
+<a name="l01205"></a>01205         raw_iterator where = previous;
+<a name="l01206"></a>01206 
+<a name="l01207"></a>01207         __TBB_ASSERT(where != last, <span class="stringliteral">"Invalid head node"</span>);
+<a name="l01208"></a>01208 
+<a name="l01209"></a>01209         <span class="comment">// First node is a dummy node</span>
+<a name="l01210"></a>01210         ++where;
+<a name="l01211"></a>01211 
+<a name="l01212"></a>01212         <span class="keywordflow">for</span> (;;) {
+<a name="l01213"></a>01213             <span class="keywordflow">if</span> (where == last)
+<a name="l01214"></a>01214                 <span class="keywordflow">return</span> end();
+<a name="l01215"></a>01215             <span class="keywordflow">else</span> <span class="keywordflow">if</span> (my_solist.get_iterator(where) == it)
+<a name="l01216"></a>01216                 <span class="keywordflow">return</span> my_solist.erase_node(previous, it);
+<a name="l01217"></a>01217 
+<a name="l01218"></a>01218             <span class="comment">// Move the iterator forward</span>
+<a name="l01219"></a>01219             previous = where;
+<a name="l01220"></a>01220             ++where;
+<a name="l01221"></a>01221         }
+<a name="l01222"></a>01222     }
+<a name="l01223"></a>01223 
+<a name="l01224"></a>01224     <span class="comment">// Return the [begin, end) pair of iterators with the same key values.</span>
+<a name="l01225"></a>01225     <span class="comment">// This operation makes sense only if mapping is many-to-one.</span>
+<a name="l01226"></a>01226     pairii_t internal_equal_range(<span class="keyword">const</span> key_type&amp; key)
+<a name="l01227"></a>01227     {
+<a name="l01228"></a>01228         sokey_t order_key = (sokey_t) my_hash_compare(key);
+<a name="l01229"></a>01229         size_type bucket = order_key % my_number_of_buckets;
+<a name="l01230"></a>01230 
+<a name="l01231"></a>01231         <span class="comment">// If bucket is empty, initialize it first</span>
+<a name="l01232"></a>01232         <span class="keywordflow">if</span> (!is_initialized(bucket))
+<a name="l01233"></a>01233             init_bucket(bucket);
+<a name="l01234"></a>01234 
+<a name="l01235"></a>01235         order_key = split_order_key_regular(order_key);
+<a name="l01236"></a>01236         raw_iterator end_it = my_solist.raw_end();
+<a name="l01237"></a>01237 
+<a name="l01238"></a>01238         <span class="keywordflow">for</span> (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
+<a name="l01239"></a>01239         {
+<a name="l01240"></a>01240             <span class="keywordflow">if</span> (solist_t::get_order_key(it) &gt; order_key)
+<a name="l01241"></a>01241             {
+<a name="l01242"></a>01242                 <span class="comment">// There is no element with the given key</span>
+<a name="l01243"></a>01243                 <span class="keywordflow">return</span> pairii_t(end(), end());
+<a name="l01244"></a>01244             }
+<a name="l01245"></a>01245             <span class="keywordflow">else</span> <span class="keywordflow">if</span> (solist_t::get_order_key(it) == order_key &amp;&amp; !my_hash_compare(get_key(*it), key))
+<a name="l01246"></a>01246             {
+<a name="l01247"></a>01247                 iterator first = my_solist.get_iterator(it);
+<a name="l01248"></a>01248                 iterator last = first;
+<a name="l01249"></a>01249                 <span class="keywordflow">do</span> ++last; <span class="keywordflow">while</span>( allow_multimapping &amp;&amp; last != end() &amp;&amp; !my_hash_compare(get_key(*last), key) );
+<a name="l01250"></a>01250                 <span class="keywordflow">return</span> pairii_t(first, last);
+<a name="l01251"></a>01251             }
+<a name="l01252"></a>01252         }
+<a name="l01253"></a>01253 
+<a name="l01254"></a>01254         <span class="keywordflow">return</span> pairii_t(end(), end());
+<a name="l01255"></a>01255     }
+<a name="l01256"></a>01256 
+<a name="l01257"></a>01257     <span class="comment">// Bucket APIs</span>
+<a name="l01258"></a>01258     <span class="keywordtype">void</span> init_bucket(size_type bucket)
+<a name="l01259"></a>01259     {
+<a name="l01260"></a>01260         <span class="comment">// Bucket 0 has no parent. Initialize it and return.</span>
+<a name="l01261"></a>01261         <span class="keywordflow">if</span> (bucket == 0) {
+<a name="l01262"></a>01262             internal_init();
+<a name="l01263"></a>01263             <span class="keywordflow">return</span>;
+<a name="l01264"></a>01264         }
+<a name="l01265"></a>01265 
+<a name="l01266"></a>01266         size_type parent_bucket = get_parent(bucket);
+<a name="l01267"></a>01267 
+<a name="l01268"></a>01268         <span class="comment">// All parent_bucket buckets have to be initialized before this bucket is</span>
+<a name="l01269"></a>01269         <span class="keywordflow">if</span> (!is_initialized(parent_bucket))
+<a name="l01270"></a>01270             init_bucket(parent_bucket);
+<a name="l01271"></a>01271 
+<a name="l01272"></a>01272         raw_iterator parent = get_bucket(parent_bucket);
+<a name="l01273"></a>01273 
+<a name="l01274"></a>01274         <span class="comment">// Create a dummy first node in this bucket</span>
+<a name="l01275"></a>01275         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
+<a name="l01276"></a>01276         set_bucket(bucket, dummy_node);
+<a name="l01277"></a>01277     }
+<a name="l01278"></a>01278 
+<a name="l01279"></a>01279     <span class="keywordtype">void</span> adjust_table_size(size_type total_elements, size_type current_size)
+<a name="l01280"></a>01280     {
+<a name="l01281"></a>01281         <span class="comment">// Grow the table by a factor of 2 if possible and needed</span>
+<a name="l01282"></a>01282         <span class="keywordflow">if</span> ( ((<span class="keywordtype">float</span>) total_elements / (<span class="keywordtype">float</span>) current_size) &gt; my_maximum_bucket_size )
+<a name="l01283"></a>01283         {
+<a name="l01284"></a>01284             <span class="comment">// Double the size of the hash only if size has not changed inbetween loads</span>
+<a name="l01285"></a>01285             __TBB_CompareAndSwapW((uintptr_t*)&amp;my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
+<a name="l01286"></a>01286             <span class="comment">//Simple "my_number_of_buckets.compare_and_swap( current_size&lt;&lt;1, current_size );" does not work for VC8</span>
+<a name="l01287"></a>01287             <span class="comment">//due to overzealous compiler warnings in /Wp64 mode</span>
+<a name="l01288"></a>01288         }
+<a name="l01289"></a>01289     }
+<a name="l01290"></a>01290 
+<a name="l01291"></a>01291     size_type get_parent(size_type bucket)<span class="keyword"> const</span>
+<a name="l01292"></a>01292 <span class="keyword">    </span>{
+<a name="l01293"></a>01293         <span class="comment">// Unsets bucket's most significant turned-on bit</span>
+<a name="l01294"></a>01294         size_type msb = __TBB_Log2((uintptr_t)bucket);
+<a name="l01295"></a>01295         <span class="keywordflow">return</span> bucket &amp; ~(size_type(1) &lt;&lt; msb);
+<a name="l01296"></a>01296     }
+<a name="l01297"></a>01297 
+<a name="l01298"></a>01298 
+<a name="l01299"></a>01299     <span class="comment">// Dynamic sized array (segments)</span>
+<a name="l01301"></a>01301 <span class="comment"></span>    <span class="keyword">static</span> size_type segment_index_of( size_type index ) {
+<a name="l01302"></a>01302         <span class="keywordflow">return</span> size_type( __TBB_Log2( uintptr_t(index|1) ) );
+<a name="l01303"></a>01303     }
+<a name="l01304"></a>01304 
+<a name="l01306"></a>01306     <span class="keyword">static</span> size_type segment_base( size_type k ) {
+<a name="l01307"></a>01307         <span class="keywordflow">return</span> (size_type(1)&lt;&lt;k &amp; ~size_type(1));
+<a name="l01308"></a>01308     }
+<a name="l01309"></a>01309 
+<a name="l01311"></a>01311     <span class="keyword">static</span> size_type segment_size( size_type k ) {
+<a name="l01312"></a>01312         <span class="keywordflow">return</span> k? size_type(1)&lt;&lt;k : 2;
+<a name="l01313"></a>01313     }
+<a name="l01314"></a>01314 
+<a name="l01315"></a>01315     raw_iterator get_bucket(size_type bucket)<span class="keyword"> const </span>{
+<a name="l01316"></a>01316         size_type segment = segment_index_of(bucket);
+<a name="l01317"></a>01317         bucket -= segment_base(segment);
+<a name="l01318"></a>01318         __TBB_ASSERT( my_buckets[segment], <span class="stringliteral">"bucket must be in an allocated segment"</span> );
+<a name="l01319"></a>01319         <span class="keywordflow">return</span> my_buckets[segment][bucket];
+<a name="l01320"></a>01320     }
+<a name="l01321"></a>01321 
+<a name="l01322"></a>01322     <span class="keywordtype">void</span> set_bucket(size_type bucket, raw_iterator dummy_head) {
+<a name="l01323"></a>01323         size_type segment = segment_index_of(bucket);
+<a name="l01324"></a>01324         bucket -= segment_base(segment);
+<a name="l01325"></a>01325 
+<a name="l01326"></a>01326         <span class="keywordflow">if</span> (my_buckets[segment] == NULL) {
+<a name="l01327"></a>01327             size_type sz = segment_size(segment);
+<a name="l01328"></a>01328             raw_iterator * new_segment = my_allocator.allocate(sz);
+<a name="l01329"></a>01329             std::memset(new_segment, 0, sz*<span class="keyword">sizeof</span>(raw_iterator));
+<a name="l01330"></a>01330 
+<a name="l01331"></a>01331             <span class="keywordflow">if</span> (__TBB_CompareAndSwapW((<span class="keywordtype">void</span> *) &amp;my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
+<a name="l01332"></a>01332                 my_allocator.deallocate(new_segment, sz);
+<a name="l01333"></a>01333         }
+<a name="l01334"></a>01334 
+<a name="l01335"></a>01335         my_buckets[segment][bucket] = dummy_head;
+<a name="l01336"></a>01336     }
+<a name="l01337"></a>01337 
+<a name="l01338"></a>01338     <span class="keywordtype">bool</span> is_initialized(size_type bucket)<span class="keyword"> const </span>{
+<a name="l01339"></a>01339         size_type segment = segment_index_of(bucket);
+<a name="l01340"></a>01340         bucket -= segment_base(segment);
+<a name="l01341"></a>01341 
+<a name="l01342"></a>01342         <span class="keywordflow">if</span> (my_buckets[segment] == NULL)
+<a name="l01343"></a>01343             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01344"></a>01344 
+<a name="l01345"></a>01345         raw_iterator it = my_buckets[segment][bucket];
+<a name="l01346"></a>01346         <span class="keywordflow">return</span> (it.get_node_ptr() != NULL);
+<a name="l01347"></a>01347     }
+<a name="l01348"></a>01348 
+<a name="l01349"></a>01349     <span class="comment">// Utilities for keys</span>
+<a name="l01350"></a>01350 
+<a name="l01351"></a>01351     <span class="comment">// A regular order key has its original hash value reversed and the last bit set</span>
+<a name="l01352"></a>01352     sokey_t split_order_key_regular(sokey_t order_key)<span class="keyword"> const </span>{
+<a name="l01353"></a>01353         <span class="keywordflow">return</span> __TBB_ReverseBits(order_key) | 0x1;
+<a name="l01354"></a>01354     }
+<a name="l01355"></a>01355 
+<a name="l01356"></a>01356     <span class="comment">// A dummy order key has its original hash value reversed and the last bit unset</span>
+<a name="l01357"></a>01357     sokey_t split_order_key_dummy(sokey_t order_key)<span class="keyword"> const </span>{
+<a name="l01358"></a>01358         <span class="keywordflow">return</span> __TBB_ReverseBits(order_key) &amp; ~(0x1);
+<a name="l01359"></a>01359     }
+<a name="l01360"></a>01360 
+<a name="l01361"></a>01361     <span class="comment">// Shared variables</span>
+<a name="l01362"></a>01362     atomic&lt;size_type&gt;                                             my_number_of_buckets;       <span class="comment">// Current table size</span>
+<a name="l01363"></a>01363     solist_t                                                      my_solist;                  <span class="comment">// List where all the elements are kept</span>
+<a name="l01364"></a>01364     <span class="keyword">typename</span> allocator_type::template rebind&lt;raw_iterator&gt;::other my_allocator;               <span class="comment">// Allocator object for segments</span>
+<a name="l01365"></a>01365     <span class="keywordtype">float</span>                                                         my_maximum_bucket_size;     <span class="comment">// Maximum size of the bucket</span>
+<a name="l01366"></a>01366     atomic&lt;raw_iterator*&gt;                                         my_buckets[pointers_per_table]; <span class="comment">// The segment table</span>
+<a name="l01367"></a>01367 };
+<a name="l01368"></a>01368 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l01369"></a>01369 <span class="preprocessor"></span><span class="preprocessor">#pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it</span>
+<a name="l01370"></a>01370 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l01371"></a>01371 <span class="preprocessor"></span>
+<a name="l01373"></a>01373 <span class="keyword">static</span> <span class="keyword">const</span> size_t hash_multiplier = <span class="keyword">sizeof</span>(size_t)==4? 2654435769U : 11400714819323198485ULL;
+<a name="l01374"></a>01374 } <span class="comment">// namespace internal</span>
+<a name="l01377"></a>01377 <span class="comment"></span><span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l01378"></a>01378 <span class="keyword">inline</span> size_t tbb_hasher( <span class="keyword">const</span> T&amp; t ) {
+<a name="l01379"></a>01379     <span class="keywordflow">return</span> static_cast&lt;size_t&gt;( t ) * internal::hash_multiplier;
+<a name="l01380"></a>01380 }
+<a name="l01381"></a>01381 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> P&gt;
+<a name="l01382"></a>01382 <span class="keyword">inline</span> size_t tbb_hasher( P* ptr ) {
+<a name="l01383"></a>01383     size_t <span class="keyword">const</span> h = reinterpret_cast&lt;size_t&gt;( ptr );
+<a name="l01384"></a>01384     <span class="keywordflow">return</span> (h &gt;&gt; 3) ^ h;
+<a name="l01385"></a>01385 }
+<a name="l01386"></a>01386 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> E, <span class="keyword">typename</span> S, <span class="keyword">typename</span> A&gt;
+<a name="l01387"></a>01387 <span class="keyword">inline</span> size_t tbb_hasher( <span class="keyword">const</span> std::basic_string&lt;E,S,A&gt;&amp; s ) {
+<a name="l01388"></a>01388     size_t h = 0;
+<a name="l01389"></a>01389     <span class="keywordflow">for</span>( <span class="keyword">const</span> E* c = s.c_str(); *c; ++c )
+<a name="l01390"></a>01390         h = static_cast&lt;size_t&gt;(*c) ^ (h * internal::hash_multiplier);
+<a name="l01391"></a>01391     <span class="keywordflow">return</span> h;
+<a name="l01392"></a>01392 }
+<a name="l01393"></a>01393 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F, <span class="keyword">typename</span> S&gt;
+<a name="l01394"></a>01394 <span class="keyword">inline</span> size_t tbb_hasher( <span class="keyword">const</span> std::pair&lt;F,S&gt;&amp; p ) {
+<a name="l01395"></a>01395     <span class="keywordflow">return</span> tbb_hasher(p.first) ^ tbb_hasher(p.second);
+<a name="l01396"></a>01396 }
+<a name="l01397"></a>01397 } <span class="comment">// namespace interface5</span>
+<a name="l01398"></a>01398 <span class="keyword">using</span> interface5::tbb_hasher;
+<a name="l01399"></a>01399 } <span class="comment">// namespace tbb</span>
+<a name="l01400"></a>01400 <span class="preprocessor">#endif// __TBB_concurrent_unordered_internal_H</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00314.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00314.html
new file mode 100644 (file)
index 0000000..df4a42f
--- /dev/null
@@ -0,0 +1,103 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>_tbb_windef.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>_tbb_windef.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_windef_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#error Do not #include this file directly.  Use "#include tbb/tbb_stddef.h" instead.</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_windef_H */</span>
+<a name="l00024"></a>00024 
+<a name="l00025"></a>00025 <span class="comment">// Check that the target Windows version has all API calls requried for TBB.</span>
+<a name="l00026"></a>00026 <span class="comment">// Do not increase the version in condition beyond 0x0500 without prior discussion!</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#if defined(_WIN32_WINNT) &amp;&amp; _WIN32_WINNT&lt;0x0400</span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span><span class="preprocessor">#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0400 or greater.</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span>
+<a name="l00031"></a>00031 <span class="preprocessor">#if !defined(_MT)</span>
+<a name="l00032"></a>00032 <span class="preprocessor"></span><span class="preprocessor">#error TBB requires linkage with multithreaded C/C++ runtime library. \</span>
+<a name="l00033"></a>00033 <span class="preprocessor">       Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch.</span>
+<a name="l00034"></a>00034 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span>
+<a name="l00036"></a>00036 <span class="comment">// Workaround for the problem with MVSC headers failing to define namespace std</span>
+<a name="l00037"></a>00037 <span class="keyword">namespace </span>std {
+<a name="l00038"></a>00038   using ::size_t; using ::ptrdiff_t;
+<a name="l00039"></a>00039 }
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="preprocessor">#define __TBB_STRING_AUX(x) #x</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_STRING(x) __TBB_STRING_AUX(x)</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span>
+<a name="l00044"></a>00044 <span class="comment">// Default setting of TBB_USE_DEBUG</span>
+<a name="l00045"></a>00045 <span class="preprocessor">#ifdef TBB_USE_DEBUG</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span><span class="preprocessor">#    if TBB_USE_DEBUG </span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span><span class="preprocessor">#        if !defined(_DEBUG)</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span><span class="preprocessor">#            pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0")</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span><span class="preprocessor">#        endif</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span><span class="preprocessor">#    else</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span><span class="preprocessor">#        if defined(_DEBUG)</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span><span class="preprocessor">#            pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0")</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span><span class="preprocessor">#        endif</span>
+<a name="l00054"></a>00054 <span class="preprocessor"></span><span class="preprocessor">#    endif</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00056"></a>00056 <span class="preprocessor"></span><span class="preprocessor">#    ifdef _DEBUG</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span><span class="preprocessor">#        define TBB_USE_DEBUG 1</span>
+<a name="l00058"></a>00058 <span class="preprocessor"></span><span class="preprocessor">#    endif</span>
+<a name="l00059"></a>00059 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span>
+<a name="l00061"></a>00061 <span class="preprocessor">#if __TBB_BUILD &amp;&amp; !defined(__TBB_NO_IMPLICIT_LINKAGE)</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_NO_IMPLICIT_LINKAGE 1</span>
+<a name="l00063"></a>00063 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00064"></a>00064 <span class="preprocessor"></span>
+<a name="l00065"></a>00065 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00066"></a>00066 <span class="preprocessor"></span><span class="preprocessor">    #if !__TBB_NO_IMPLICIT_LINKAGE</span>
+<a name="l00067"></a>00067 <span class="preprocessor"></span><span class="preprocessor">        #ifdef _DEBUG</span>
+<a name="l00068"></a>00068 <span class="preprocessor"></span><span class="preprocessor">            #pragma comment(lib, "tbb_debug.lib")</span>
+<a name="l00069"></a>00069 <span class="preprocessor"></span><span class="preprocessor">        #else</span>
+<a name="l00070"></a>00070 <span class="preprocessor"></span><span class="preprocessor">            #pragma comment(lib, "tbb.lib")</span>
+<a name="l00071"></a>00071 <span class="preprocessor"></span><span class="preprocessor">        #endif</span>
+<a name="l00072"></a>00072 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00073"></a>00073 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00316.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00316.html
new file mode 100644 (file)
index 0000000..f0fb98e
--- /dev/null
@@ -0,0 +1,73 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>aligned_space.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>aligned_space.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_aligned_space_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_aligned_space_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00030"></a>00030 
+<a name="l00032"></a>00032 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T,size_t N&gt;
+<a name="l00033"></a><a class="code" href="a00146.html">00033</a> <span class="keyword">class </span><a class="code" href="a00146.html">aligned_space</a> {
+<a name="l00034"></a>00034 <span class="keyword">private</span>:
+<a name="l00035"></a>00035     <span class="keyword">typedef</span> __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type;
+<a name="l00036"></a>00036     element_type array[(<span class="keyword">sizeof</span>(T)*N+<span class="keyword">sizeof</span>(element_type)-1)/<span class="keyword">sizeof</span>(element_type)];
+<a name="l00037"></a>00037 <span class="keyword">public</span>:
+<a name="l00039"></a><a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">00039</a>     T* <a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>() {<span class="keywordflow">return</span> internal::punned_cast&lt;T*&gt;(<span class="keyword">this</span>);}
+<a name="l00040"></a>00040 
+<a name="l00042"></a><a class="code" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">00042</a>     T* <a class="code" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">end</a>() {<span class="keywordflow">return</span> <a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()+N;}
+<a name="l00043"></a>00043 };
+<a name="l00044"></a>00044 
+<a name="l00045"></a>00045 } <span class="comment">// namespace tbb </span>
+<a name="l00046"></a>00046 
+<a name="l00047"></a>00047 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_aligned_space_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00317.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00317.html
new file mode 100644 (file)
index 0000000..41b5401
--- /dev/null
@@ -0,0 +1,367 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>atomic.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>atomic.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_atomic_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_atomic_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include &lt;cstddef&gt;</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="preprocessor">#if _MSC_VER </span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_LONG_LONG __int64</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_LONG_LONG long long</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* _MSC_VER */</span>
+<a name="l00032"></a>00032 
+<a name="l00033"></a>00033 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span>    <span class="comment">// Workaround for overzealous compiler warnings </span>
+<a name="l00037"></a>00037 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4244 4267)</span>
+<a name="l00039"></a>00039 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span>
+<a name="l00041"></a>00041 <span class="keyword">namespace </span>tbb {
+<a name="l00042"></a>00042 
+<a name="l00044"></a><a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef">00044</a> <span class="keyword">enum</span> <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef">memory_semantics</a> {
+<a name="l00046"></a>00046     <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>,
+<a name="l00048"></a>00048     <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>,
+<a name="l00050"></a>00050     <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>
+<a name="l00051"></a>00051 };
+<a name="l00052"></a>00052 
+<a name="l00054"></a>00054 <span class="keyword">namespace </span>internal {
+<a name="l00055"></a>00055 
+<a name="l00056"></a>00056 <span class="preprocessor">#if __GNUC__ || __SUNPRO_CC</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));</span>
+<a name="l00058"></a>00058 <span class="preprocessor"></span><span class="preprocessor">#elif defined(__INTEL_COMPILER)||_MSC_VER &gt;= 1300</span>
+<a name="l00059"></a>00059 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span><span class="preprocessor">#else </span>
+<a name="l00061"></a>00061 <span class="preprocessor"></span><span class="preprocessor">#error Do not know syntax for forcing alignment.</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __GNUC__ */</span>
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064 <span class="keyword">template</span>&lt;size_t S&gt;
+<a name="l00065"></a>00065 <span class="keyword">struct </span>atomic_rep;           <span class="comment">// Primary template declared, but never defined.</span>
+<a name="l00066"></a>00066 
+<a name="l00067"></a>00067 <span class="keyword">template</span>&lt;&gt;
+<a name="l00068"></a>00068 <span class="keyword">struct </span>atomic_rep&lt;1&gt; {       <span class="comment">// Specialization</span>
+<a name="l00069"></a>00069     <span class="keyword">typedef</span> int8_t word;
+<a name="l00070"></a>00070     int8_t value;
+<a name="l00071"></a>00071 };
+<a name="l00072"></a>00072 <span class="keyword">template</span>&lt;&gt;
+<a name="l00073"></a>00073 <span class="keyword">struct </span>atomic_rep&lt;2&gt; {       <span class="comment">// Specialization</span>
+<a name="l00074"></a>00074     <span class="keyword">typedef</span> int16_t word;
+<a name="l00075"></a>00075     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
+<a name="l00076"></a>00076 };
+<a name="l00077"></a>00077 <span class="keyword">template</span>&lt;&gt;
+<a name="l00078"></a>00078 <span class="keyword">struct </span>atomic_rep&lt;4&gt; {       <span class="comment">// Specialization</span>
+<a name="l00079"></a>00079 <span class="preprocessor">#if _MSC_VER &amp;&amp; __TBB_WORDSIZE==4</span>
+<a name="l00080"></a>00080 <span class="preprocessor"></span>    <span class="comment">// Work-around that avoids spurious /Wp64 warnings</span>
+<a name="l00081"></a>00081     <span class="keyword">typedef</span> intptr_t word;
+<a name="l00082"></a>00082 <span class="preprocessor">#else</span>
+<a name="l00083"></a>00083 <span class="preprocessor"></span>    <span class="keyword">typedef</span> int32_t word;
+<a name="l00084"></a>00084 <span class="preprocessor">#endif</span>
+<a name="l00085"></a>00085 <span class="preprocessor"></span>    __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
+<a name="l00086"></a>00086 };
+<a name="l00087"></a>00087 <span class="keyword">template</span>&lt;&gt;
+<a name="l00088"></a>00088 <span class="keyword">struct </span>atomic_rep&lt;8&gt; {       <span class="comment">// Specialization</span>
+<a name="l00089"></a>00089     <span class="keyword">typedef</span> int64_t word;
+<a name="l00090"></a>00090     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
+<a name="l00091"></a>00091 };
+<a name="l00092"></a>00092 
+<a name="l00093"></a>00093 <span class="keyword">template</span>&lt;size_t Size, memory_semantics M&gt;
+<a name="l00094"></a>00094 <span class="keyword">struct </span>atomic_traits;        <span class="comment">// Primary template declared, but not defined.</span>
+<a name="l00095"></a>00095 
+<a name="l00096"></a>00096 <span class="preprocessor">#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \</span>
+<a name="l00097"></a>00097 <span class="preprocessor">    template&lt;&gt; struct atomic_traits&lt;S,M&gt; {                               \</span>
+<a name="l00098"></a>00098 <span class="preprocessor">        typedef atomic_rep&lt;S&gt;::word word;                               \</span>
+<a name="l00099"></a>00099 <span class="preprocessor">        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\</span>
+<a name="l00100"></a>00100 <span class="preprocessor">            return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \</span>
+<a name="l00101"></a>00101 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00102"></a>00102 <span class="preprocessor">        inline static word fetch_and_add( volatile void* location, word addend ) { \</span>
+<a name="l00103"></a>00103 <span class="preprocessor">            return __TBB_FetchAndAdd##S##M(location,addend);                    \</span>
+<a name="l00104"></a>00104 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00105"></a>00105 <span class="preprocessor">        inline static word fetch_and_store( volatile void* location, word value ) {\</span>
+<a name="l00106"></a>00106 <span class="preprocessor">            return __TBB_FetchAndStore##S##M(location,value);                   \</span>
+<a name="l00107"></a>00107 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00108"></a>00108 <span class="preprocessor">    };</span>
+<a name="l00109"></a>00109 <span class="preprocessor"></span>
+<a name="l00110"></a>00110 <span class="preprocessor">#define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \</span>
+<a name="l00111"></a>00111 <span class="preprocessor">    template&lt;memory_semantics M&gt;                                         \</span>
+<a name="l00112"></a>00112 <span class="preprocessor">    struct atomic_traits&lt;S,M&gt; {                                          \</span>
+<a name="l00113"></a>00113 <span class="preprocessor">        typedef atomic_rep&lt;S&gt;::word word;                               \</span>
+<a name="l00114"></a>00114 <span class="preprocessor">        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\</span>
+<a name="l00115"></a>00115 <span class="preprocessor">            return __TBB_CompareAndSwap##S(location,new_value,comparand);       \</span>
+<a name="l00116"></a>00116 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00117"></a>00117 <span class="preprocessor">        inline static word fetch_and_add( volatile void* location, word addend ) { \</span>
+<a name="l00118"></a>00118 <span class="preprocessor">            return __TBB_FetchAndAdd##S(location,addend);                       \</span>
+<a name="l00119"></a>00119 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00120"></a>00120 <span class="preprocessor">        inline static word fetch_and_store( volatile void* location, word value ) {\</span>
+<a name="l00121"></a>00121 <span class="preprocessor">            return __TBB_FetchAndStore##S(location,value);                      \</span>
+<a name="l00122"></a>00122 <span class="preprocessor">        }                                                                       \</span>
+<a name="l00123"></a>00123 <span class="preprocessor">    };</span>
+<a name="l00124"></a>00124 <span class="preprocessor"></span>
+<a name="l00125"></a>00125 <span class="preprocessor">#if __TBB_DECL_FENCED_ATOMICS</span>
+<a name="l00126"></a>00126 <span class="preprocessor"></span>__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>)
+<a name="l00127"></a>00127 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>)
+<a name="l00128"></a>00128 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>)
+<a name="l00129"></a>00129 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">__TBB_full_fence</a>)
+<a name="l00130"></a>00130 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>)
+<a name="l00131"></a>00131 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>)
+<a name="l00132"></a>00132 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>)
+<a name="l00133"></a>00133 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>)
+<a name="l00134"></a>00134 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>)
+<a name="l00135"></a>00135 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>)
+<a name="l00136"></a>00136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>)
+<a name="l00137"></a>00137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,<a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>)
+<a name="l00138"></a>00138 <span class="preprocessor">#else</span>
+<a name="l00139"></a>00139 <span class="preprocessor"></span>__TBB_DECL_ATOMIC_PRIMITIVES(1)
+<a name="l00140"></a>00140 __TBB_DECL_ATOMIC_PRIMITIVES(2)
+<a name="l00141"></a>00141 __TBB_DECL_ATOMIC_PRIMITIVES(4)
+<a name="l00142"></a>00142 __TBB_DECL_ATOMIC_PRIMITIVES(8)
+<a name="l00143"></a>00143 <span class="preprocessor">#endif</span>
+<a name="l00144"></a>00144 <span class="preprocessor"></span>
+<a name="l00146"></a>00146 
+<a name="l00148"></a>00148 <span class="preprocessor">#define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))</span>
+<a name="l00149"></a>00149 <span class="preprocessor"></span>
+<a name="l00151"></a>00151 
+<a name="l00153"></a>00153 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00154"></a>00154 <span class="keyword">struct </span>atomic_impl {
+<a name="l00155"></a>00155 <span class="keyword">protected</span>:
+<a name="l00156"></a>00156     atomic_rep&lt;sizeof(T)&gt; rep;
+<a name="l00157"></a>00157 <span class="keyword">private</span>:
+<a name="l00159"></a>00159     <span class="keyword">union </span>converter {
+<a name="l00160"></a>00160         T value;
+<a name="l00161"></a>00161         <span class="keyword">typename</span> atomic_rep&lt;sizeof(T)&gt;::word bits;
+<a name="l00162"></a>00162     };
+<a name="l00163"></a>00163 <span class="keyword">public</span>:
+<a name="l00164"></a>00164     <span class="keyword">typedef</span> T value_type;
+<a name="l00165"></a>00165 
+<a name="l00166"></a>00166     <span class="keyword">template</span>&lt;memory_semantics M&gt;
+<a name="l00167"></a>00167     value_type fetch_and_store( value_type value ) {
+<a name="l00168"></a>00168         converter u, w;
+<a name="l00169"></a>00169         u.value = value;
+<a name="l00170"></a>00170         w.bits = internal::atomic_traits&lt;sizeof(value_type),M&gt;::fetch_and_store(&amp;rep.value,u.bits);
+<a name="l00171"></a>00171         <span class="keywordflow">return</span> w.value;
+<a name="l00172"></a>00172     }
+<a name="l00173"></a>00173 
+<a name="l00174"></a>00174     value_type fetch_and_store( value_type value ) {
+<a name="l00175"></a>00175         <span class="keywordflow">return</span> fetch_and_store&lt;__TBB_full_fence&gt;(value);
+<a name="l00176"></a>00176     }
+<a name="l00177"></a>00177 
+<a name="l00178"></a>00178     <span class="keyword">template</span>&lt;memory_semantics M&gt;
+<a name="l00179"></a>00179     value_type compare_and_swap( value_type value, value_type comparand ) {
+<a name="l00180"></a>00180         converter u, v, w;
+<a name="l00181"></a>00181         u.value = value;
+<a name="l00182"></a>00182         v.value = comparand;
+<a name="l00183"></a>00183         w.bits = internal::atomic_traits&lt;sizeof(value_type),M&gt;::compare_and_swap(&amp;rep.value,u.bits,v.bits);
+<a name="l00184"></a>00184         <span class="keywordflow">return</span> w.value;
+<a name="l00185"></a>00185     }
+<a name="l00186"></a>00186 
+<a name="l00187"></a>00187     value_type compare_and_swap( value_type value, value_type comparand ) {
+<a name="l00188"></a>00188         <span class="keywordflow">return</span> compare_and_swap&lt;__TBB_full_fence&gt;(value,comparand);
+<a name="l00189"></a>00189     }
+<a name="l00190"></a>00190 
+<a name="l00191"></a>00191     operator value_type()<span class="keyword"> const volatile </span>{                <span class="comment">// volatile qualifier here for backwards compatibility </span>
+<a name="l00192"></a>00192         converter w;
+<a name="l00193"></a>00193         w.bits = __TBB_load_with_acquire( rep.value );
+<a name="l00194"></a>00194         <span class="keywordflow">return</span> w.value;
+<a name="l00195"></a>00195     }
+<a name="l00196"></a>00196 
+<a name="l00197"></a>00197 <span class="keyword">protected</span>:
+<a name="l00198"></a>00198     value_type store_with_release( value_type rhs ) {
+<a name="l00199"></a>00199         converter u;
+<a name="l00200"></a>00200         u.value = rhs;
+<a name="l00201"></a>00201         __TBB_store_with_release(rep.value,u.bits);
+<a name="l00202"></a>00202         <span class="keywordflow">return</span> rhs;
+<a name="l00203"></a>00203     }
+<a name="l00204"></a>00204 };
+<a name="l00205"></a>00205 
+<a name="l00207"></a>00207 
+<a name="l00210"></a>00210 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I, <span class="keyword">typename</span> D, <span class="keyword">typename</span> StepType&gt;
+<a name="l00211"></a>00211 <span class="keyword">struct </span>atomic_impl_with_arithmetic: atomic_impl&lt;I&gt; {
+<a name="l00212"></a>00212 <span class="keyword">public</span>:
+<a name="l00213"></a>00213     <span class="keyword">typedef</span> I value_type;
+<a name="l00214"></a>00214 
+<a name="l00215"></a>00215     <span class="keyword">template</span>&lt;memory_semantics M&gt;
+<a name="l00216"></a>00216     value_type fetch_and_add( D addend ) {
+<a name="l00217"></a>00217         <span class="keywordflow">return</span> value_type(internal::atomic_traits&lt;<span class="keyword">sizeof</span>(value_type),M&gt;::fetch_and_add( &amp;this-&gt;rep.value, addend*<span class="keyword">sizeof</span>(StepType) ));
+<a name="l00218"></a>00218     }
+<a name="l00219"></a>00219 
+<a name="l00220"></a>00220     value_type fetch_and_add( D addend ) {
+<a name="l00221"></a>00221         <span class="keywordflow">return</span> fetch_and_add&lt;__TBB_full_fence&gt;(addend);
+<a name="l00222"></a>00222     }
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224     <span class="keyword">template</span>&lt;memory_semantics M&gt;
+<a name="l00225"></a>00225     value_type fetch_and_increment() {
+<a name="l00226"></a>00226         <span class="keywordflow">return</span> fetch_and_add&lt;M&gt;(1);
+<a name="l00227"></a>00227     }
+<a name="l00228"></a>00228 
+<a name="l00229"></a>00229     value_type fetch_and_increment() {
+<a name="l00230"></a>00230         <span class="keywordflow">return</span> fetch_and_add(1);
+<a name="l00231"></a>00231     }
+<a name="l00232"></a>00232 
+<a name="l00233"></a>00233     <span class="keyword">template</span>&lt;memory_semantics M&gt;
+<a name="l00234"></a>00234     value_type fetch_and_decrement() {
+<a name="l00235"></a>00235         <span class="keywordflow">return</span> fetch_and_add&lt;M&gt;(__TBB_MINUS_ONE(D));
+<a name="l00236"></a>00236     }
+<a name="l00237"></a>00237 
+<a name="l00238"></a>00238     value_type fetch_and_decrement() {
+<a name="l00239"></a>00239         <span class="keywordflow">return</span> fetch_and_add(__TBB_MINUS_ONE(D));
+<a name="l00240"></a>00240     }
+<a name="l00241"></a>00241 
+<a name="l00242"></a>00242 <span class="keyword">public</span>:
+<a name="l00243"></a>00243     value_type operator+=( D addend ) {
+<a name="l00244"></a>00244         <span class="keywordflow">return</span> fetch_and_add(addend)+addend;
+<a name="l00245"></a>00245     }
+<a name="l00246"></a>00246 
+<a name="l00247"></a>00247     value_type operator-=( D addend ) {
+<a name="l00248"></a>00248         <span class="comment">// Additive inverse of addend computed using binary minus,</span>
+<a name="l00249"></a>00249         <span class="comment">// instead of unary minus, for sake of avoiding compiler warnings.</span>
+<a name="l00250"></a>00250         <span class="keywordflow">return</span> operator+=(D(0)-addend);    
+<a name="l00251"></a>00251     }
+<a name="l00252"></a>00252 
+<a name="l00253"></a>00253     value_type operator++() {
+<a name="l00254"></a>00254         <span class="keywordflow">return</span> fetch_and_add(1)+1;
+<a name="l00255"></a>00255     }
+<a name="l00256"></a>00256 
+<a name="l00257"></a>00257     value_type operator--() {
+<a name="l00258"></a>00258         <span class="keywordflow">return</span> fetch_and_add(__TBB_MINUS_ONE(D))-1;
+<a name="l00259"></a>00259     }
+<a name="l00260"></a>00260 
+<a name="l00261"></a>00261     value_type operator++(<span class="keywordtype">int</span>) {
+<a name="l00262"></a>00262         <span class="keywordflow">return</span> fetch_and_add(1);
+<a name="l00263"></a>00263     }
+<a name="l00264"></a>00264 
+<a name="l00265"></a>00265     value_type operator--(<span class="keywordtype">int</span>) {
+<a name="l00266"></a>00266         <span class="keywordflow">return</span> fetch_and_add(__TBB_MINUS_ONE(D));
+<a name="l00267"></a>00267     }
+<a name="l00268"></a>00268 };
+<a name="l00269"></a>00269 
+<a name="l00270"></a>00270 } <span class="comment">/* Internal */</span>
+<a name="l00272"></a>00272 
+<a name="l00274"></a>00274 
+<a name="l00276"></a>00276 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00277"></a><a class="code" href="a00147.html">00277</a> <span class="keyword">struct </span><a class="code" href="a00147.html">atomic</a>: internal::atomic_impl&lt;T&gt; {
+<a name="l00278"></a>00278     T operator=( T rhs ) {
+<a name="l00279"></a>00279         <span class="comment">// "this" required here in strict ISO C++ because store_with_release is a dependent name</span>
+<a name="l00280"></a>00280         <span class="keywordflow">return</span> this-&gt;store_with_release(rhs);
+<a name="l00281"></a>00281     }
+<a name="l00282"></a>00282     <a class="code" href="a00147.html">atomic&lt;T&gt;</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00147.html">atomic&lt;T&gt;</a>&amp; rhs ) {this-&gt;store_with_release(rhs); <span class="keywordflow">return</span> *<span class="keyword">this</span>;}
+<a name="l00283"></a>00283 };
+<a name="l00284"></a>00284 
+<a name="l00285"></a>00285 <span class="preprocessor">#define __TBB_DECL_ATOMIC(T) \</span>
+<a name="l00286"></a>00286 <span class="preprocessor">    template&lt;&gt; struct atomic&lt;T&gt;: internal::atomic_impl_with_arithmetic&lt;T,T,char&gt; {  \</span>
+<a name="l00287"></a>00287 <span class="preprocessor">        T operator=( T rhs ) {return store_with_release(rhs);}  \</span>
+<a name="l00288"></a>00288 <span class="preprocessor">        atomic&lt;T&gt;&amp; operator=( const atomic&lt;T&gt;&amp; rhs ) {store_with_release(rhs); return *this;}  \</span>
+<a name="l00289"></a>00289 <span class="preprocessor">    };</span>
+<a name="l00290"></a>00290 <span class="preprocessor"></span>
+<a name="l00291"></a>00291 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
+<a name="l00292"></a>00292 __TBB_DECL_ATOMIC(<span class="keywordtype">unsigned</span> __TBB_LONG_LONG)
+<a name="l00293"></a>00293 __TBB_DECL_ATOMIC(<span class="keywordtype">long</span>)
+<a name="l00294"></a>00294 __TBB_DECL_ATOMIC(<span class="keywordtype">unsigned</span> <span class="keywordtype">long</span>)
+<a name="l00295"></a>00295 
+<a name="l00296"></a>00296 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; __TBB_WORDSIZE==4</span>
+<a name="l00297"></a>00297 <span class="preprocessor"></span><span class="comment">/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. </span>
+<a name="l00298"></a>00298 <span class="comment">   It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) </span>
+<a name="l00299"></a>00299 <span class="comment">   with an operator=(U) that explicitly converts the U to a T.  Types T and U should be</span>
+<a name="l00300"></a>00300 <span class="comment">   type synonyms on the platform.  Type U should be the wider variant of T from the</span>
+<a name="l00301"></a>00301 <span class="comment">   perspective of /Wp64. */</span>
+<a name="l00302"></a>00302 <span class="preprocessor">#define __TBB_DECL_ATOMIC_ALT(T,U) \</span>
+<a name="l00303"></a>00303 <span class="preprocessor">    template&lt;&gt; struct atomic&lt;T&gt;: internal::atomic_impl_with_arithmetic&lt;T,T,char&gt; {  \</span>
+<a name="l00304"></a>00304 <span class="preprocessor">        T operator=( U rhs ) {return store_with_release(T(rhs));}  \</span>
+<a name="l00305"></a>00305 <span class="preprocessor">        atomic&lt;T&gt;&amp; operator=( const atomic&lt;T&gt;&amp; rhs ) {store_with_release(rhs); return *this;}  \</span>
+<a name="l00306"></a>00306 <span class="preprocessor">    };</span>
+<a name="l00307"></a>00307 <span class="preprocessor"></span><a class="code" href="a00272.html#ad165cf61abbe349d413df2589679add">__TBB_DECL_ATOMIC_ALT</a>(<span class="keywordtype">unsigned</span>,size_t)
+<a name="l00308"></a><a class="code" href="a00272.html#ad165cf61abbe349d413df2589679add">00308</a> <a class="code" href="a00272.html#ad165cf61abbe349d413df2589679add">__TBB_DECL_ATOMIC_ALT</a>(<span class="keywordtype">int</span>,ptrdiff_t)
+<a name="l00309"></a>00309 <span class="preprocessor">#else</span>
+<a name="l00310"></a>00310 <span class="preprocessor"></span>__TBB_DECL_ATOMIC(<span class="keywordtype">unsigned</span>)
+<a name="l00311"></a>00311 __TBB_DECL_ATOMIC(<span class="keywordtype">int</span>)
+<a name="l00312"></a>00312 <span class="preprocessor">#endif </span><span class="comment">/* defined(_MSC_VER) &amp;&amp; __TBB_WORDSIZE==4 */</span>
+<a name="l00313"></a>00313 
+<a name="l00314"></a>00314 __TBB_DECL_ATOMIC(<span class="keywordtype">unsigned</span> <span class="keywordtype">short</span>)
+<a name="l00315"></a>00315 __TBB_DECL_ATOMIC(<span class="keywordtype">short</span>)
+<a name="l00316"></a>00316 __TBB_DECL_ATOMIC(<span class="keywordtype">char</span>)
+<a name="l00317"></a>00317 __TBB_DECL_ATOMIC(<span class="keywordtype">signed</span> <span class="keywordtype">char</span>)
+<a name="l00318"></a>00318 __TBB_DECL_ATOMIC(<span class="keywordtype">unsigned</span> <span class="keywordtype">char</span>)
+<a name="l00319"></a>00319 
+<a name="l00320"></a>00320 <span class="preprocessor">#if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) </span>
+<a name="l00321"></a>00321 <span class="preprocessor"></span>__TBB_DECL_ATOMIC(<span class="keywordtype">wchar_t</span>)
+<a name="l00322"></a>00322 <span class="preprocessor">#endif </span><span class="comment">/* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */</span>
+<a name="l00323"></a>00323 
+<a name="l00325"></a>00325 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">struct </span><a class="code" href="a00147.html">atomic</a>&lt;T*&gt;: internal::atomic_impl_with_arithmetic&lt;T*,ptrdiff_t,T&gt; {
+<a name="l00326"></a>00326     T* operator=( T* rhs ) {
+<a name="l00327"></a>00327         <span class="comment">// "this" required here in strict ISO C++ because store_with_release is a dependent name</span>
+<a name="l00328"></a>00328         <span class="keywordflow">return</span> this-&gt;store_with_release(rhs);
+<a name="l00329"></a>00329     }
+<a name="l00330"></a>00330     <a class="code" href="a00147.html">atomic&lt;T*&gt;</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00147.html">atomic&lt;T*&gt;</a>&amp; rhs ) {
+<a name="l00331"></a>00331         this-&gt;store_with_release(rhs); <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00332"></a>00332     }
+<a name="l00333"></a>00333     T* operator-&gt;()<span class="keyword"> const </span>{
+<a name="l00334"></a>00334         <span class="keywordflow">return</span> (*<span class="keyword">this</span>);
+<a name="l00335"></a>00335     }
+<a name="l00336"></a>00336 };
+<a name="l00337"></a>00337 
+<a name="l00339"></a><a class="code" href="a00148.html">00339</a> <span class="keyword">template</span>&lt;&gt; <span class="keyword">struct </span><a class="code" href="a00147.html">atomic</a>&lt;void*&gt;: internal::atomic_impl&lt;void*&gt; {
+<a name="l00340"></a>00340     <span class="keywordtype">void</span>* operator=( <span class="keywordtype">void</span>* rhs ) {
+<a name="l00341"></a>00341         <span class="comment">// "this" required here in strict ISO C++ because store_with_release is a dependent name</span>
+<a name="l00342"></a>00342         <span class="keywordflow">return</span> this-&gt;store_with_release(rhs);
+<a name="l00343"></a>00343     }
+<a name="l00344"></a>00344     <a class="code" href="a00147.html">atomic&lt;void*&gt;</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00147.html">atomic&lt;void*&gt;</a>&amp; rhs ) {
+<a name="l00345"></a>00345         this-&gt;store_with_release(rhs); <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00346"></a>00346     }
+<a name="l00347"></a>00347 };
+<a name="l00348"></a>00348 
+<a name="l00349"></a>00349 } <span class="comment">// namespace tbb</span>
+<a name="l00350"></a>00350 
+<a name="l00351"></a>00351 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00352"></a>00352 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00353"></a>00353 <span class="preprocessor"></span><span class="preprocessor">#endif // warnings 4244, 4267 are back</span>
+<a name="l00354"></a>00354 <span class="preprocessor"></span>
+<a name="l00355"></a>00355 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_atomic_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00325.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00325.html
new file mode 100644 (file)
index 0000000..3da0cf9
--- /dev/null
@@ -0,0 +1,127 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>blocked_range.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>blocked_range.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_blocked_range_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_blocked_range_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 
+<a name="l00037"></a>00037 
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Value&gt;
+<a name="l00040"></a><a class="code" href="a00152.html">00040</a> <span class="keyword">class </span><a class="code" href="a00152.html">blocked_range</a> {
+<a name="l00041"></a>00041 <span class="keyword">public</span>:
+<a name="l00043"></a>00043 
+<a name="l00045"></a><a class="code" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">00045</a>     <span class="keyword">typedef</span> Value <a class="code" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a>;
+<a name="l00046"></a>00046 
+<a name="l00048"></a><a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">00048</a>     <span class="keyword">typedef</span> std::size_t <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a>;
+<a name="l00049"></a>00049 
+<a name="l00051"></a>00051 
+<a name="l00052"></a><a class="code" href="a00152.html#94607755c5110d199202234d58d022ac">00052</a>     <a class="code" href="a00152.html#94607755c5110d199202234d58d022ac">blocked_range</a>() : my_end(), my_begin() {}
+<a name="l00053"></a>00053 
+<a name="l00055"></a><a class="code" href="a00152.html#14795a36ead1414b4371dbe1a4656359">00055</a>     <a class="code" href="a00152.html#94607755c5110d199202234d58d022ac">blocked_range</a>( Value begin_, Value end_, <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> grainsize_=1 ) : 
+<a name="l00056"></a>00056         my_end(end_), my_begin(begin_), my_grainsize(grainsize_) 
+<a name="l00057"></a>00057     {
+<a name="l00058"></a>00058         __TBB_ASSERT( my_grainsize&gt;0, <span class="stringliteral">"grainsize must be positive"</span> );
+<a name="l00059"></a>00059     }
+<a name="l00060"></a>00060 
+<a name="l00062"></a><a class="code" href="a00152.html#18d2258400756ac1446dac7676b18df3">00062</a>     <a class="code" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a> <a class="code" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_begin;}
+<a name="l00063"></a>00063 
+<a name="l00065"></a><a class="code" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">00065</a>     <a class="code" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">const_iterator</a> <a class="code" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_end;}
+<a name="l00066"></a>00066 
+<a name="l00068"></a>00068 
+<a name="l00069"></a><a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">00069</a>     <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> <a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()<span class="keyword"> const </span>{
+<a name="l00070"></a>00070         __TBB_ASSERT( !(<a class="code" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end</a>()&lt;<a class="code" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin</a>()), <span class="stringliteral">"size() unspecified if end()&lt;begin()"</span> );
+<a name="l00071"></a>00071         <span class="keywordflow">return</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a>(my_end-my_begin);
+<a name="l00072"></a>00072     }
+<a name="l00073"></a>00073 
+<a name="l00075"></a><a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">00075</a>     <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> <a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_grainsize;}
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00078"></a>00078     <span class="comment">// Methods that implement Range concept</span>
+<a name="l00079"></a>00079     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00080"></a>00080 
+<a name="l00082"></a><a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">00082</a>     <span class="keywordtype">bool</span> <a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> !(my_begin&lt;my_end);}
+<a name="l00083"></a>00083 
+<a name="l00085"></a>00085 
+<a name="l00086"></a><a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">00086</a>     <span class="keywordtype">bool</span> <a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_grainsize&lt;<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>();}
+<a name="l00087"></a>00087 
+<a name="l00089"></a>00089 
+<a name="l00091"></a><a class="code" href="a00152.html#4c0efd2be3f96a0ab3ba5085e8b3fcc7">00091</a>     <a class="code" href="a00152.html#94607755c5110d199202234d58d022ac">blocked_range</a>( <a class="code" href="a00152.html">blocked_range</a>&amp; r, <a class="code" href="a00203.html">split</a> ) : 
+<a name="l00092"></a>00092         my_end(r.my_end),
+<a name="l00093"></a>00093         my_begin(do_split(r)),
+<a name="l00094"></a>00094         my_grainsize(r.my_grainsize)
+<a name="l00095"></a>00095     {}
+<a name="l00096"></a>00096 
+<a name="l00097"></a>00097 <span class="keyword">private</span>:
+<a name="l00099"></a>00099     Value my_end;
+<a name="l00100"></a>00100     Value my_begin;
+<a name="l00101"></a>00101     <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">size_type</a> my_grainsize;
+<a name="l00102"></a>00102 
+<a name="l00104"></a>00104 
+<a name="l00105"></a>00105     <span class="keyword">static</span> Value do_split( <a class="code" href="a00152.html">blocked_range</a>&amp; r ) {
+<a name="l00106"></a>00106         __TBB_ASSERT( r.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>(), <span class="stringliteral">"cannot split blocked_range that is not divisible"</span> );
+<a name="l00107"></a>00107         Value middle = r.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> + (r.<a class="code" href="a00152.html#1ec95c8988b50064dd603998b16f3930">my_end</a>-r.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a>)/2u;
+<a name="l00108"></a>00108         r.<a class="code" href="a00152.html#1ec95c8988b50064dd603998b16f3930">my_end</a> = middle;
+<a name="l00109"></a>00109         <span class="keywordflow">return</span> middle;
+<a name="l00110"></a>00110     }
+<a name="l00111"></a>00111 
+<a name="l00112"></a>00112     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RowValue, <span class="keyword">typename</span> ColValue&gt;
+<a name="l00113"></a>00113     <span class="keyword">friend</span> <span class="keyword">class </span>blocked_range2d;
+<a name="l00114"></a>00114 
+<a name="l00115"></a>00115     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RowValue, <span class="keyword">typename</span> ColValue, <span class="keyword">typename</span> PageValue&gt;
+<a name="l00116"></a>00116     <span class="keyword">friend</span> <span class="keyword">class </span>blocked_range3d;
+<a name="l00117"></a>00117 };
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119 } <span class="comment">// namespace tbb </span>
+<a name="l00120"></a>00120 
+<a name="l00121"></a>00121 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_blocked_range_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00326.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00326.html
new file mode 100644 (file)
index 0000000..958ef10
--- /dev/null
@@ -0,0 +1,113 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>blocked_range2d.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>blocked_range2d.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_blocked_range2d_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_blocked_range2d_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00030"></a>00030 
+<a name="l00031"></a>00031 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RowValue, <span class="keyword">typename</span> ColValue=RowValue&gt;
+<a name="l00032"></a><a class="code" href="a00153.html">00032</a> <span class="keyword">class </span><a class="code" href="a00153.html">blocked_range2d</a> {
+<a name="l00033"></a>00033 <span class="keyword">public</span>:
+<a name="l00035"></a><a class="code" href="a00153.html#a807a22fe658ec38b8edfd69521d0383">00035</a>     <span class="keyword">typedef</span> <a class="code" href="a00152.html">blocked_range&lt;RowValue&gt;</a> <a class="code" href="a00152.html">row_range_type</a>;
+<a name="l00036"></a>00036     <span class="keyword">typedef</span> <a class="code" href="a00152.html">blocked_range&lt;ColValue&gt;</a> <a class="code" href="a00152.html">col_range_type</a>;
+<a name="l00037"></a>00037  
+<a name="l00038"></a>00038 <span class="keyword">private</span>:
+<a name="l00039"></a>00039     <a class="code" href="a00152.html">row_range_type</a> my_rows;
+<a name="l00040"></a>00040     <a class="code" href="a00152.html">col_range_type</a> my_cols;
+<a name="l00041"></a>00041 
+<a name="l00042"></a>00042 <span class="keyword">public</span>:
+<a name="l00043"></a>00043 
+<a name="l00044"></a>00044     <a class="code" href="a00153.html">blocked_range2d</a>( RowValue row_begin, RowValue row_end, <span class="keyword">typename</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">row_range_type::size_type</a> row_grainsize,
+<a name="l00045"></a>00045                      ColValue col_begin, ColValue col_end, <span class="keyword">typename</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">col_range_type::size_type</a> col_grainsize ) : 
+<a name="l00046"></a>00046         my_rows(row_begin,row_end,row_grainsize),
+<a name="l00047"></a>00047         my_cols(col_begin,col_end,col_grainsize)
+<a name="l00048"></a>00048     {
+<a name="l00049"></a>00049     }
+<a name="l00050"></a>00050 
+<a name="l00051"></a>00051     <a class="code" href="a00153.html">blocked_range2d</a>( RowValue row_begin, RowValue row_end,
+<a name="l00052"></a>00052                      ColValue col_begin, ColValue col_end ) : 
+<a name="l00053"></a>00053         my_rows(row_begin,row_end),
+<a name="l00054"></a>00054         my_cols(col_begin,col_end)
+<a name="l00055"></a>00055     {
+<a name="l00056"></a>00056     }
+<a name="l00057"></a>00057 
+<a name="l00059"></a><a class="code" href="a00153.html#d144cb2d88cef553420311aca8667a44">00059</a>     <span class="keywordtype">bool</span> <a class="code" href="a00153.html#d144cb2d88cef553420311aca8667a44">empty</a>()<span class="keyword"> const </span>{
+<a name="l00060"></a>00060         <span class="comment">// Yes, it is a logical OR here, not AND.</span>
+<a name="l00061"></a>00061         <span class="keywordflow">return</span> my_rows.<a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>() || my_cols.<a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>();
+<a name="l00062"></a>00062     }
+<a name="l00063"></a>00063 
+<a name="l00065"></a><a class="code" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">00065</a>     <span class="keywordtype">bool</span> <a class="code" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">is_divisible</a>()<span class="keyword"> const </span>{
+<a name="l00066"></a>00066         <span class="keywordflow">return</span> my_rows.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>() || my_cols.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>();
+<a name="l00067"></a>00067     }
+<a name="l00068"></a>00068 
+<a name="l00069"></a>00069     <a class="code" href="a00153.html">blocked_range2d</a>( <a class="code" href="a00153.html">blocked_range2d</a>&amp; r, <a class="code" href="a00203.html">split</a> ) : 
+<a name="l00070"></a>00070         my_rows(r.my_rows),
+<a name="l00071"></a>00071         my_cols(r.my_cols)
+<a name="l00072"></a>00072     {
+<a name="l00073"></a>00073         <span class="keywordflow">if</span>( my_rows.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_cols.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) &lt; my_cols.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_rows.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) ) {
+<a name="l00074"></a>00074             my_cols.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = col_range_type::do_split(r.<a class="code" href="a00153.html#9837bbaabc5836741fde79512ea1131d">my_cols</a>);
+<a name="l00075"></a>00075         } <span class="keywordflow">else</span> {
+<a name="l00076"></a>00076             my_rows.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = row_range_type::do_split(r.<a class="code" href="a00153.html#4be7c7b39da256afe9c67be735b6f788">my_rows</a>);
+<a name="l00077"></a>00077         }
+<a name="l00078"></a>00078     }
+<a name="l00079"></a>00079 
+<a name="l00081"></a><a class="code" href="a00153.html#f496e7348a82652fba581203477cc07c">00081</a>     <span class="keyword">const</span> <a class="code" href="a00152.html">row_range_type</a>&amp; <a class="code" href="a00153.html#f496e7348a82652fba581203477cc07c">rows</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_rows;}
+<a name="l00082"></a>00082 
+<a name="l00084"></a><a class="code" href="a00153.html#392a46759af2c884957115771affa7f4">00084</a>     <span class="keyword">const</span> <a class="code" href="a00152.html">col_range_type</a>&amp; <a class="code" href="a00153.html#392a46759af2c884957115771affa7f4">cols</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_cols;}
+<a name="l00085"></a>00085 };
+<a name="l00086"></a>00086 
+<a name="l00087"></a>00087 } <span class="comment">// namespace tbb </span>
+<a name="l00088"></a>00088 
+<a name="l00089"></a>00089 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_blocked_range2d_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00327.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00327.html
new file mode 100644 (file)
index 0000000..f6f141c
--- /dev/null
@@ -0,0 +1,131 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>blocked_range3d.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>blocked_range3d.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_blocked_range3d_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_blocked_range3d_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00030"></a>00030 
+<a name="l00031"></a>00031 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> PageValue, <span class="keyword">typename</span> RowValue=PageValue, <span class="keyword">typename</span> ColValue=RowValue&gt;
+<a name="l00032"></a><a class="code" href="a00154.html">00032</a> <span class="keyword">class </span><a class="code" href="a00154.html">blocked_range3d</a> {
+<a name="l00033"></a>00033 <span class="keyword">public</span>:
+<a name="l00035"></a><a class="code" href="a00154.html#b8ebf17a552ba47825e9b3887855b719">00035</a>     <span class="keyword">typedef</span> <a class="code" href="a00152.html">blocked_range&lt;PageValue&gt;</a> <a class="code" href="a00152.html">page_range_type</a>;
+<a name="l00036"></a>00036     <span class="keyword">typedef</span> <a class="code" href="a00152.html">blocked_range&lt;RowValue&gt;</a>  <a class="code" href="a00152.html">row_range_type</a>;
+<a name="l00037"></a>00037     <span class="keyword">typedef</span> <a class="code" href="a00152.html">blocked_range&lt;ColValue&gt;</a>  <a class="code" href="a00152.html">col_range_type</a>;
+<a name="l00038"></a>00038  
+<a name="l00039"></a>00039 <span class="keyword">private</span>:
+<a name="l00040"></a>00040     <a class="code" href="a00152.html">page_range_type</a> my_pages;
+<a name="l00041"></a>00041     <a class="code" href="a00152.html">row_range_type</a>  my_rows;
+<a name="l00042"></a>00042     <a class="code" href="a00152.html">col_range_type</a>  my_cols;
+<a name="l00043"></a>00043 
+<a name="l00044"></a>00044 <span class="keyword">public</span>:
+<a name="l00045"></a>00045 
+<a name="l00046"></a>00046     <a class="code" href="a00154.html">blocked_range3d</a>( PageValue page_begin, PageValue page_end,
+<a name="l00047"></a>00047                      RowValue  row_begin,  RowValue row_end,
+<a name="l00048"></a>00048                      ColValue  col_begin,  ColValue col_end ) : 
+<a name="l00049"></a>00049         my_pages(page_begin,page_end),
+<a name="l00050"></a>00050         my_rows(row_begin,row_end),
+<a name="l00051"></a>00051         my_cols(col_begin,col_end)
+<a name="l00052"></a>00052     {
+<a name="l00053"></a>00053     }
+<a name="l00054"></a>00054 
+<a name="l00055"></a>00055     <a class="code" href="a00154.html">blocked_range3d</a>( PageValue page_begin, PageValue page_end, <span class="keyword">typename</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">page_range_type::size_type</a> page_grainsize, 
+<a name="l00056"></a>00056                      RowValue  row_begin,  RowValue row_end,   <span class="keyword">typename</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">row_range_type::size_type</a> row_grainsize,
+<a name="l00057"></a>00057                      ColValue  col_begin,  ColValue col_end,   <span class="keyword">typename</span> <a class="code" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">col_range_type::size_type</a> col_grainsize ) :  
+<a name="l00058"></a>00058         my_pages(page_begin,page_end,page_grainsize),
+<a name="l00059"></a>00059         my_rows(row_begin,row_end,row_grainsize),
+<a name="l00060"></a>00060         my_cols(col_begin,col_end,col_grainsize)
+<a name="l00061"></a>00061     {
+<a name="l00062"></a>00062     }
+<a name="l00063"></a>00063 
+<a name="l00065"></a><a class="code" href="a00154.html#356860e1c977d91711e8216bd55c0b25">00065</a>     <span class="keywordtype">bool</span> <a class="code" href="a00154.html#356860e1c977d91711e8216bd55c0b25">empty</a>()<span class="keyword"> const </span>{
+<a name="l00066"></a>00066         <span class="comment">// Yes, it is a logical OR here, not AND.</span>
+<a name="l00067"></a>00067         <span class="keywordflow">return</span> my_pages.<a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>() || my_rows.<a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>() || my_cols.<a class="code" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">empty</a>();
+<a name="l00068"></a>00068     }
+<a name="l00069"></a>00069 
+<a name="l00071"></a><a class="code" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">00071</a>     <span class="keywordtype">bool</span> <a class="code" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">is_divisible</a>()<span class="keyword"> const </span>{
+<a name="l00072"></a>00072         <span class="keywordflow">return</span>  my_pages.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>() || my_rows.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>() || my_cols.<a class="code" href="a00152.html#41a58b703d574b6e1ca155df3576f578">is_divisible</a>();
+<a name="l00073"></a>00073     }
+<a name="l00074"></a>00074 
+<a name="l00075"></a>00075     <a class="code" href="a00154.html">blocked_range3d</a>( <a class="code" href="a00154.html">blocked_range3d</a>&amp; r, <a class="code" href="a00203.html">split</a> ) : 
+<a name="l00076"></a>00076         my_pages(r.my_pages),
+<a name="l00077"></a>00077         my_rows(r.my_rows),
+<a name="l00078"></a>00078         my_cols(r.my_cols)
+<a name="l00079"></a>00079     {
+<a name="l00080"></a>00080         <span class="keywordflow">if</span>( my_pages.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_rows.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) &lt; my_rows.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_pages.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) ) {
+<a name="l00081"></a>00081             <span class="keywordflow">if</span> ( my_rows.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_cols.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) &lt; my_cols.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_rows.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) ) {
+<a name="l00082"></a>00082                 my_cols.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = col_range_type::do_split(r.<a class="code" href="a00154.html#ef7143ddf8f5b1265b5a6d05be00cde5">my_cols</a>);
+<a name="l00083"></a>00083             } <span class="keywordflow">else</span> {
+<a name="l00084"></a>00084                 my_rows.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = row_range_type::do_split(r.<a class="code" href="a00154.html#76ead25c280ef630b88a1c8846471707">my_rows</a>);
+<a name="l00085"></a>00085             }
+<a name="l00086"></a>00086         } <span class="keywordflow">else</span> {
+<a name="l00087"></a>00087             <span class="keywordflow">if</span> ( my_pages.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_cols.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) &lt; my_cols.<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>()*double(my_pages.<a class="code" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">grainsize</a>()) ) {
+<a name="l00088"></a>00088                 my_cols.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = col_range_type::do_split(r.<a class="code" href="a00154.html#ef7143ddf8f5b1265b5a6d05be00cde5">my_cols</a>);
+<a name="l00089"></a>00089             } <span class="keywordflow">else</span> {
+<a name="l00090"></a>00090                     my_pages.<a class="code" href="a00152.html#e02559f817d8352c7bdf41dfddc53958">my_begin</a> = page_range_type::do_split(r.<a class="code" href="a00154.html#b18ae46a01d42745078d9e287ab87baa">my_pages</a>);
+<a name="l00091"></a>00091             }
+<a name="l00092"></a>00092         }
+<a name="l00093"></a>00093     }
+<a name="l00094"></a>00094 
+<a name="l00096"></a><a class="code" href="a00154.html#cf971430aa12361d3ed245344b7c6764">00096</a>     <span class="keyword">const</span> <a class="code" href="a00152.html">page_range_type</a>&amp; <a class="code" href="a00154.html#cf971430aa12361d3ed245344b7c6764">pages</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_pages;}
+<a name="l00097"></a>00097 
+<a name="l00099"></a><a class="code" href="a00154.html#1584623e59ff32a8aa82006827508be4">00099</a>     <span class="keyword">const</span> <a class="code" href="a00152.html">row_range_type</a>&amp; <a class="code" href="a00154.html#1584623e59ff32a8aa82006827508be4">rows</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_rows;}
+<a name="l00100"></a>00100 
+<a name="l00102"></a><a class="code" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">00102</a>     <span class="keyword">const</span> <a class="code" href="a00152.html">col_range_type</a>&amp; <a class="code" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">cols</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_cols;}
+<a name="l00103"></a>00103 
+<a name="l00104"></a>00104 };
+<a name="l00105"></a>00105 
+<a name="l00106"></a>00106 } <span class="comment">// namespace tbb </span>
+<a name="l00107"></a>00107 
+<a name="l00108"></a>00108 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_blocked_range3d_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00328.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00328.html
new file mode 100644 (file)
index 0000000..21a990e
--- /dev/null
@@ -0,0 +1,140 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>cache_aligned_allocator.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>cache_aligned_allocator.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_cache_aligned_allocator_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_cache_aligned_allocator_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00030"></a>00030 <span class="keyword">namespace </span>internal {
+<a name="l00032"></a>00032 
+<a name="l00033"></a>00033     size_t __TBB_EXPORTED_FUNC NFS_GetLineSize();
+<a name="l00034"></a>00034 
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, <span class="keywordtype">void</span>* hint );
+<a name="l00038"></a>00038 
+<a name="l00040"></a>00040 
+<a name="l00042"></a>00042     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC NFS_Free( <span class="keywordtype">void</span>* );
+<a name="l00043"></a>00043 }
+<a name="l00045"></a>00045 
+<a name="l00046"></a>00046 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span>    <span class="comment">// Workaround for erroneous "unreferenced parameter" warning in method destroy.</span>
+<a name="l00048"></a>00048 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4100)</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span>
+<a name="l00053"></a>00053 
+<a name="l00056"></a>00056 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00057"></a><a class="code" href="a00155.html">00057</a> <span class="keyword">class </span><a class="code" href="a00155.html">cache_aligned_allocator</a> {
+<a name="l00058"></a>00058 <span class="keyword">public</span>:
+<a name="l00059"></a>00059     <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::allocator_type&lt;T&gt;::value_type value_type;
+<a name="l00060"></a>00060     <span class="keyword">typedef</span> value_type* pointer;
+<a name="l00061"></a>00061     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type* const_pointer;
+<a name="l00062"></a>00062     <span class="keyword">typedef</span> value_type&amp; reference;
+<a name="l00063"></a>00063     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type&amp; const_reference;
+<a name="l00064"></a>00064     <span class="keyword">typedef</span> size_t size_type;
+<a name="l00065"></a>00065     <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00066"></a>00066     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00067"></a>00067         <span class="keyword">typedef</span> <a class="code" href="a00155.html">cache_aligned_allocator&lt;U&gt;</a> other;
+<a name="l00068"></a>00068     };
+<a name="l00069"></a>00069 
+<a name="l00070"></a>00070     <a class="code" href="a00155.html">cache_aligned_allocator</a>() <span class="keywordflow">throw</span>() {}
+<a name="l00071"></a>00071     <a class="code" href="a00155.html">cache_aligned_allocator</a>( <span class="keyword">const</span> <a class="code" href="a00155.html">cache_aligned_allocator</a>&amp; ) <span class="keywordflow">throw</span>() {}
+<a name="l00072"></a>00072     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <a class="code" href="a00155.html">cache_aligned_allocator</a>(<span class="keyword">const</span> <a class="code" href="a00155.html">cache_aligned_allocator&lt;U&gt;</a>&amp;) <span class="keywordflow">throw</span>() {}
+<a name="l00073"></a>00073 
+<a name="l00074"></a>00074     pointer address(reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00075"></a>00075     const_pointer address(const_reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00076"></a>00076     
+<a name="l00078"></a><a class="code" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">00078</a>     pointer <a class="code" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">allocate</a>( size_type n, <span class="keyword">const</span> <span class="keywordtype">void</span>* hint=0 ) {
+<a name="l00079"></a>00079         <span class="comment">// The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt</span>
+<a name="l00080"></a>00080         <span class="keywordflow">return</span> pointer(internal::NFS_Allocate( n, <span class="keyword">sizeof</span>(value_type), const_cast&lt;void*&gt;(hint) ));
+<a name="l00081"></a>00081     }
+<a name="l00082"></a>00082 
+<a name="l00084"></a><a class="code" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">00084</a>     <span class="keywordtype">void</span> <a class="code" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">deallocate</a>( pointer p, size_type ) {
+<a name="l00085"></a>00085         internal::NFS_Free(p);
+<a name="l00086"></a>00086     }
+<a name="l00087"></a>00087 
+<a name="l00089"></a><a class="code" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">00089</a>     size_type <a class="code" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">max_size</a>() const throw() {
+<a name="l00090"></a>00090         <span class="keywordflow">return</span> (~size_t(0)-internal::NFS_MaxLineSize)/<span class="keyword">sizeof</span>(value_type);
+<a name="l00091"></a>00091     }
+<a name="l00092"></a>00092 
+<a name="l00094"></a><a class="code" href="a00155.html#958ee8745c86c275bfc9533af565e017">00094</a>     <span class="keywordtype">void</span> <a class="code" href="a00155.html#958ee8745c86c275bfc9533af565e017">construct</a>( pointer p, <span class="keyword">const</span> value_type&amp; value ) {::new((<span class="keywordtype">void</span>*)(p)) value_type(value);}
+<a name="l00095"></a>00095 
+<a name="l00097"></a><a class="code" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">00097</a>     <span class="keywordtype">void</span> <a class="code" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">destroy</a>( pointer p ) {p-&gt;~value_type();}
+<a name="l00098"></a>00098 };
+<a name="l00099"></a>00099 
+<a name="l00100"></a>00100 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00101"></a>00101 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00102"></a>00102 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4100 is back</span>
+<a name="l00103"></a>00103 <span class="preprocessor"></span>
+<a name="l00105"></a>00105 
+<a name="l00106"></a>00106 <span class="keyword">template</span>&lt;&gt; 
+<a name="l00107"></a><a class="code" href="a00156.html">00107</a> <span class="keyword">class </span><a class="code" href="a00155.html">cache_aligned_allocator</a>&lt;void&gt; {
+<a name="l00108"></a>00108 <span class="keyword">public</span>:
+<a name="l00109"></a>00109     <span class="keyword">typedef</span> <span class="keywordtype">void</span>* pointer;
+<a name="l00110"></a>00110     <span class="keyword">typedef</span> <span class="keyword">const</span> <span class="keywordtype">void</span>* const_pointer;
+<a name="l00111"></a>00111     <span class="keyword">typedef</span> <span class="keywordtype">void</span> value_type;
+<a name="l00112"></a>00112     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00113"></a>00113         <span class="keyword">typedef</span> <a class="code" href="a00155.html">cache_aligned_allocator&lt;U&gt;</a> other;
+<a name="l00114"></a>00114     };
+<a name="l00115"></a>00115 };
+<a name="l00116"></a>00116 
+<a name="l00117"></a>00117 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00118"></a>00118 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> <a class="code" href="a00155.html">cache_aligned_allocator&lt;T&gt;</a>&amp;, <span class="keyword">const</span> <a class="code" href="a00155.html">cache_aligned_allocator&lt;U&gt;</a>&amp; ) {<span class="keywordflow">return</span> <span class="keyword">true</span>;}
+<a name="l00119"></a>00119 
+<a name="l00120"></a>00120 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00121"></a>00121 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> cache_aligned_allocator&lt;T&gt;&amp;, <span class="keyword">const</span> cache_aligned_allocator&lt;U&gt;&amp; ) {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00122"></a>00122 
+<a name="l00123"></a>00123 } <span class="comment">// namespace tbb</span>
+<a name="l00124"></a>00124 
+<a name="l00125"></a>00125 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_cache_aligned_allocator_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00330.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00330.html
new file mode 100644 (file)
index 0000000..bb792be
--- /dev/null
@@ -0,0 +1,98 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>combinable.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>combinable.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_combinable_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_combinable_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "enumerable_thread_specific.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 
+<a name="l00033"></a>00033     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;
+<a name="l00034"></a><a class="code" href="a00158.html">00034</a>         <span class="keyword">class </span><a class="code" href="a00158.html">combinable</a> {
+<a name="l00035"></a>00035     <span class="keyword">private</span>:
+<a name="l00036"></a>00036         <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00155.html">tbb::cache_aligned_allocator&lt;T&gt;</a> <a class="code" href="a00155.html">my_alloc</a>;
+<a name="l00037"></a>00037 
+<a name="l00038"></a>00038         <span class="keyword">typedef</span> <span class="keyword">typename</span> tbb::enumerable_thread_specific&lt;T, my_alloc, ets_no_key&gt; my_ets_type;
+<a name="l00039"></a>00039         my_ets_type my_ets; 
+<a name="l00040"></a>00040  
+<a name="l00041"></a>00041     <span class="keyword">public</span>:
+<a name="l00042"></a>00042 
+<a name="l00043"></a>00043         <a class="code" href="a00158.html">combinable</a>() { }
+<a name="l00044"></a>00044 
+<a name="l00045"></a>00045         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> finit&gt;
+<a name="l00046"></a>00046         <a class="code" href="a00158.html">combinable</a>( finit _finit) : my_ets(_finit) { }
+<a name="l00047"></a>00047 
+<a name="l00049"></a><a class="code" href="a00158.html#2c87e79ae98588a5780f708773388843">00049</a>         <a class="code" href="a00158.html#2c87e79ae98588a5780f708773388843">~combinable</a>() { 
+<a name="l00050"></a>00050         }
+<a name="l00051"></a>00051 
+<a name="l00052"></a>00052         <a class="code" href="a00158.html">combinable</a>(<span class="keyword">const</span> <a class="code" href="a00158.html">combinable</a>&amp; other) : my_ets(other.my_ets) { }
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054         combinable &amp; operator=( <span class="keyword">const</span> combinable &amp; other) { my_ets = other.my_ets; <span class="keywordflow">return</span> *<span class="keyword">this</span>; }
+<a name="l00055"></a>00055 
+<a name="l00056"></a>00056         <span class="keywordtype">void</span> clear() { my_ets.clear(); }
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058         T&amp; local() { <span class="keywordflow">return</span> my_ets.local(); }
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060         T&amp; local(<span class="keywordtype">bool</span> &amp; exists) { <span class="keywordflow">return</span> my_ets.local(exists); }
+<a name="l00061"></a>00061 
+<a name="l00062"></a>00062         <span class="comment">// combine_func_t has signature T(T,T) or T(const T&amp;, const T&amp;)</span>
+<a name="l00063"></a>00063         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> combine_func_t&gt;
+<a name="l00064"></a>00064         T combine(combine_func_t f_combine) { <span class="keywordflow">return</span> my_ets.combine(f_combine); }
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066         <span class="comment">// combine_func_t has signature void(T) or void(const T&amp;)</span>
+<a name="l00067"></a>00067         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> combine_func_t&gt;
+<a name="l00068"></a>00068         <span class="keywordtype">void</span> combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); }
+<a name="l00069"></a>00069 
+<a name="l00070"></a>00070     };
+<a name="l00071"></a>00071 } <span class="comment">// namespace tbb</span>
+<a name="l00072"></a>00072 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_combinable_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00331.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00331.html
new file mode 100644 (file)
index 0000000..a4074e1
--- /dev/null
@@ -0,0 +1,1284 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>concurrent_hash_map.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>concurrent_hash_map.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_concurrent_hash_map_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_hash_map_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00028"></a>00028 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include &lt;utility&gt;</span>      <span class="comment">// Need std::pair</span>
+<a name="l00034"></a>00034 <span class="preprocessor">#include &lt;cstring&gt;</span>      <span class="comment">// Need std::memset</span>
+<a name="l00035"></a>00035 
+<a name="l00036"></a>00036 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00039"></a>00039 <span class="preprocessor"></span>
+<a name="l00040"></a>00040 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00041"></a>00041 <span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00042"></a>00042 <span class="preprocessor">#include "spin_rw_mutex.h"</span>
+<a name="l00043"></a>00043 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00044"></a>00044 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00045"></a>00045 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00046"></a>00046 <span class="preprocessor">#include "_concurrent_unordered_internal.h"</span> <span class="comment">// Need tbb_hasher</span>
+<a name="l00047"></a>00047 <span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span><span class="preprocessor">#include &lt;typeinfo&gt;</span>
+<a name="l00049"></a>00049 <span class="preprocessor">#endif</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span>
+<a name="l00051"></a>00051 <span class="keyword">namespace </span>tbb {
+<a name="l00052"></a>00052 
+<a name="l00054"></a>00054 <span class="keyword">namespace </span>internal {
+<a name="l00056"></a>00056     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00058"></a>00058     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( <span class="keywordtype">void</span>* dst, <span class="keywordtype">void</span>* src );
+<a name="l00060"></a>00060     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC itt_load_pointer_v3( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00061"></a>00061 }
+<a name="l00063"></a>00063 
+<a name="l00065"></a>00065 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key&gt;
+<a name="l00066"></a><a class="code" href="a00213.html">00066</a> <span class="keyword">struct </span><a class="code" href="a00213.html">tbb_hash_compare</a> {
+<a name="l00067"></a>00067     <span class="keyword">static</span> size_t hash( <span class="keyword">const</span> Key&amp; a ) { <span class="keywordflow">return</span> tbb_hasher(a); }
+<a name="l00068"></a>00068     <span class="keyword">static</span> <span class="keywordtype">bool</span> equal( <span class="keyword">const</span> Key&amp; a, <span class="keyword">const</span> Key&amp; b ) { <span class="keywordflow">return</span> a == b; }
+<a name="l00069"></a>00069 };
+<a name="l00070"></a>00070 
+<a name="l00071"></a>00071 <span class="keyword">namespace </span>interface4 {
+<a name="l00072"></a>00072 
+<a name="l00073"></a>00073     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare = tbb_hash_compare&lt;Key&gt;, <span class="keyword">typename</span> A = tbb_allocator&lt;std::pair&lt;Key, T&gt; &gt; &gt;
+<a name="l00074"></a>00074     <span class="keyword">class </span><a class="code" href="a00160.html">concurrent_hash_map</a>;
+<a name="l00075"></a>00075 
+<a name="l00077"></a>00077     <span class="keyword">namespace </span>internal {
+<a name="l00078"></a>00078 
+<a name="l00079"></a>00079 
+<a name="l00081"></a>00081     <span class="keyword">typedef</span> size_t hashcode_t;
+<a name="l00083"></a>00083     <span class="keyword">struct </span>hash_map_node_base : tbb::internal::no_copy {
+<a name="l00085"></a>00085         <span class="keyword">typedef</span> <a class="code" href="a00201.html">spin_rw_mutex</a> mutex_t;
+<a name="l00087"></a>00087         <span class="keyword">typedef</span> mutex_t::scoped_lock scoped_t;
+<a name="l00089"></a>00089         hash_map_node_base *next;
+<a name="l00090"></a>00090         mutex_t <a class="code" href="a00177.html">mutex</a>;
+<a name="l00091"></a>00091     };
+<a name="l00093"></a>00093     <span class="keyword">static</span> hash_map_node_base *<span class="keyword">const</span> rehash_req = reinterpret_cast&lt;hash_map_node_base*&gt;(size_t(3));
+<a name="l00095"></a>00095     <span class="keyword">static</span> hash_map_node_base *<span class="keyword">const</span> empty_rehashed = reinterpret_cast&lt;hash_map_node_base*&gt;(size_t(0));
+<a name="l00097"></a>00097     <span class="keyword">class </span>hash_map_base {
+<a name="l00098"></a>00098     <span class="keyword">public</span>:
+<a name="l00100"></a>00100         <span class="keyword">typedef</span> size_t size_type;
+<a name="l00102"></a>00102         <span class="keyword">typedef</span> size_t hashcode_t;
+<a name="l00104"></a>00104         <span class="keyword">typedef</span> size_t segment_index_t;
+<a name="l00106"></a>00106         <span class="keyword">typedef</span> hash_map_node_base node_base;
+<a name="l00108"></a>00108         <span class="keyword">struct </span>bucket : tbb::internal::no_copy {
+<a name="l00110"></a>00110             <span class="keyword">typedef</span> spin_rw_mutex mutex_t;
+<a name="l00112"></a>00112             <span class="keyword">typedef</span> mutex_t::scoped_lock scoped_t;
+<a name="l00113"></a>00113             mutex_t mutex;
+<a name="l00114"></a>00114             node_base *node_list;
+<a name="l00115"></a>00115         };
+<a name="l00117"></a>00117         <span class="keyword">static</span> size_type <span class="keyword">const</span> embedded_block = 1;
+<a name="l00119"></a>00119         <span class="keyword">static</span> size_type <span class="keyword">const</span> embedded_buckets = 1&lt;&lt;embedded_block;
+<a name="l00121"></a>00121         <span class="keyword">static</span> size_type <span class="keyword">const</span> first_block = 8; <span class="comment">//including embedded_block. perfect with bucket size 16, so the allocations are power of 4096</span>
+<a name="l00123"></a>00123 <span class="comment"></span>        <span class="keyword">static</span> size_type <span class="keyword">const</span> pointers_per_table = <span class="keyword">sizeof</span>(segment_index_t) * 8; <span class="comment">// one segment per bit</span>
+<a name="l00125"></a>00125 <span class="comment"></span>        <span class="keyword">typedef</span> bucket *segment_ptr_t;
+<a name="l00127"></a>00127         <span class="keyword">typedef</span> segment_ptr_t segments_table_t[pointers_per_table];
+<a name="l00129"></a>00129         atomic&lt;hashcode_t&gt; my_mask;
+<a name="l00131"></a>00131         segments_table_t my_table;
+<a name="l00133"></a>00133         atomic&lt;size_type&gt; my_size; <span class="comment">// It must be in separate cache line from my_mask due to performance effects</span>
+<a name="l00135"></a>00135 <span class="comment"></span>        bucket my_embedded_segment[embedded_buckets];
+<a name="l00136"></a>00136 <span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l00137"></a>00137 <span class="preprocessor"></span>        atomic&lt;unsigned&gt; my_info_resizes; <span class="comment">// concurrent ones</span>
+<a name="l00138"></a>00138         <span class="keyword">mutable</span> atomic&lt;unsigned&gt; my_info_restarts; <span class="comment">// race collisions</span>
+<a name="l00139"></a>00139         atomic&lt;unsigned&gt; my_info_rehashes;  <span class="comment">// invocations of rehash_bucket</span>
+<a name="l00140"></a>00140 <span class="preprocessor">        #if !TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l00141"></a>00141 <span class="preprocessor"></span><span class="preprocessor">        #error Please enable TBB_USE_PERFORMANCE_WARNINGS as well</span>
+<a name="l00142"></a>00142 <span class="preprocessor"></span><span class="preprocessor">        #endif</span>
+<a name="l00143"></a>00143 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00145"></a>00145 <span class="preprocessor">        hash_map_base() {</span>
+<a name="l00146"></a>00146 <span class="preprocessor"></span>            std::memset( <span class="keyword">this</span>, 0, pointers_per_table*<span class="keyword">sizeof</span>(segment_ptr_t) <span class="comment">// 32*4=128   or 64*8=512</span>
+<a name="l00147"></a>00147                 + <span class="keyword">sizeof</span>(my_size) + <span class="keyword">sizeof</span>(my_mask)  <span class="comment">// 4+4 or 8+8</span>
+<a name="l00148"></a>00148                 + embedded_buckets*<span class="keyword">sizeof</span>(bucket) ); <span class="comment">// n*8 or n*16</span>
+<a name="l00149"></a>00149             <span class="keywordflow">for</span>( size_type i = 0; i &lt; embedded_block; i++ ) <span class="comment">// fill the table</span>
+<a name="l00150"></a>00150                 my_table[i] = my_embedded_segment + segment_base(i);
+<a name="l00151"></a>00151             my_mask = embedded_buckets - 1;
+<a name="l00152"></a>00152             __TBB_ASSERT( embedded_block &lt;= first_block, <span class="stringliteral">"The first block number must include embedded blocks"</span>);
+<a name="l00153"></a>00153 <span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l00154"></a>00154 <span class="preprocessor"></span>            my_info_resizes = 0; <span class="comment">// concurrent ones</span>
+<a name="l00155"></a>00155             my_info_restarts = 0; <span class="comment">// race collisions</span>
+<a name="l00156"></a>00156             my_info_rehashes = 0;  <span class="comment">// invocations of rehash_bucket</span>
+<a name="l00157"></a>00157 <span class="preprocessor">#endif</span>
+<a name="l00158"></a>00158 <span class="preprocessor"></span>        }
+<a name="l00159"></a>00159 
+<a name="l00161"></a>00161         <span class="keyword">static</span> segment_index_t segment_index_of( size_type index ) {
+<a name="l00162"></a>00162             <span class="keywordflow">return</span> segment_index_t( __TBB_Log2( index|1 ) );
+<a name="l00163"></a>00163         }
+<a name="l00164"></a>00164 
+<a name="l00166"></a>00166         <span class="keyword">static</span> segment_index_t segment_base( segment_index_t k ) {
+<a name="l00167"></a>00167             <span class="keywordflow">return</span> (segment_index_t(1)&lt;&lt;k &amp; ~segment_index_t(1));
+<a name="l00168"></a>00168         }
+<a name="l00169"></a>00169 
+<a name="l00171"></a>00171         <span class="keyword">static</span> size_type segment_size( segment_index_t k ) {
+<a name="l00172"></a>00172             <span class="keywordflow">return</span> size_type(1)&lt;&lt;k; <span class="comment">// fake value for k==0</span>
+<a name="l00173"></a>00173         }
+<a name="l00174"></a>00174         
+<a name="l00176"></a>00176         <span class="keyword">static</span> <span class="keywordtype">bool</span> is_valid( <span class="keywordtype">void</span> *ptr ) {
+<a name="l00177"></a>00177             <span class="keywordflow">return</span> reinterpret_cast&lt;size_t&gt;(ptr) &gt; size_t(63);
+<a name="l00178"></a>00178         }
+<a name="l00179"></a>00179 
+<a name="l00181"></a>00181         <span class="keyword">static</span> <span class="keywordtype">void</span> init_buckets( segment_ptr_t ptr, size_type sz, <span class="keywordtype">bool</span> is_initial ) {
+<a name="l00182"></a>00182             <span class="keywordflow">if</span>( is_initial ) std::memset(ptr, 0, sz*<span class="keyword">sizeof</span>(bucket) );
+<a name="l00183"></a>00183             <span class="keywordflow">else</span> <span class="keywordflow">for</span>(size_type i = 0; i &lt; sz; i++, ptr++) {
+<a name="l00184"></a>00184                     *reinterpret_cast&lt;intptr_t*&gt;(&amp;ptr-&gt;mutex) = 0;
+<a name="l00185"></a>00185                     ptr-&gt;node_list = rehash_req;
+<a name="l00186"></a>00186                 }
+<a name="l00187"></a>00187         }
+<a name="l00188"></a>00188         
+<a name="l00190"></a>00190         <span class="keyword">static</span> <span class="keywordtype">void</span> add_to_bucket( bucket *b, node_base *n ) {
+<a name="l00191"></a>00191             __TBB_ASSERT(b-&gt;node_list != rehash_req, NULL);
+<a name="l00192"></a>00192             n-&gt;next = b-&gt;node_list;
+<a name="l00193"></a>00193             b-&gt;node_list = n; <span class="comment">// its under lock and flag is set</span>
+<a name="l00194"></a>00194         }
+<a name="l00195"></a>00195 
+<a name="l00197"></a>00197         <span class="keyword">struct </span>enable_segment_failsafe {
+<a name="l00198"></a>00198             segment_ptr_t *my_segment_ptr;
+<a name="l00199"></a>00199             enable_segment_failsafe(segments_table_t &amp;table, segment_index_t k) : my_segment_ptr(&amp;table[k]) {}
+<a name="l00200"></a>00200             ~enable_segment_failsafe() {
+<a name="l00201"></a>00201                 <span class="keywordflow">if</span>( my_segment_ptr ) *my_segment_ptr = 0; <span class="comment">// indicate no allocation in progress</span>
+<a name="l00202"></a>00202             }
+<a name="l00203"></a>00203         };
+<a name="l00204"></a>00204 
+<a name="l00206"></a>00206         <span class="keywordtype">void</span> enable_segment( segment_index_t k, <span class="keywordtype">bool</span> is_initial = <span class="keyword">false</span> ) {
+<a name="l00207"></a>00207             __TBB_ASSERT( k, <span class="stringliteral">"Zero segment must be embedded"</span> );
+<a name="l00208"></a>00208             enable_segment_failsafe watchdog( my_table, k );
+<a name="l00209"></a>00209             cache_aligned_allocator&lt;bucket&gt; alloc;
+<a name="l00210"></a>00210             size_type sz;
+<a name="l00211"></a>00211             __TBB_ASSERT( !is_valid(my_table[k]), <span class="stringliteral">"Wrong concurrent assignment"</span>);
+<a name="l00212"></a>00212             <span class="keywordflow">if</span>( k &gt;= first_block ) {
+<a name="l00213"></a>00213                 sz = segment_size( k );
+<a name="l00214"></a>00214                 segment_ptr_t ptr = alloc.allocate( sz );
+<a name="l00215"></a>00215                 init_buckets( ptr, sz, is_initial );
+<a name="l00216"></a>00216 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00217"></a>00217 <span class="preprocessor"></span>                <span class="comment">// TODO: actually, fence and notification are unnecessary here and below</span>
+<a name="l00218"></a>00218                 itt_store_pointer_with_release_v3( my_table + k, ptr );
+<a name="l00219"></a>00219 <span class="preprocessor">#else</span>
+<a name="l00220"></a>00220 <span class="preprocessor"></span>                my_table[k] = ptr;<span class="comment">// my_mask has release fence</span>
+<a name="l00221"></a>00221 <span class="preprocessor">#endif</span>
+<a name="l00222"></a>00222 <span class="preprocessor"></span>                sz &lt;&lt;= 1;<span class="comment">// double it to get entire capacity of the container</span>
+<a name="l00223"></a>00223             } <span class="keywordflow">else</span> { <span class="comment">// the first block</span>
+<a name="l00224"></a>00224                 __TBB_ASSERT( k == embedded_block, <span class="stringliteral">"Wrong segment index"</span> );
+<a name="l00225"></a>00225                 sz = segment_size( first_block );
+<a name="l00226"></a>00226                 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
+<a name="l00227"></a>00227                 init_buckets( ptr, sz - embedded_buckets, is_initial );
+<a name="l00228"></a>00228                 ptr -= segment_base(embedded_block);
+<a name="l00229"></a>00229                 <span class="keywordflow">for</span>(segment_index_t i = embedded_block; i &lt; first_block; i++) <span class="comment">// calc the offsets</span>
+<a name="l00230"></a>00230 #<span class="keywordflow">if</span> TBB_USE_THREADING_TOOLS
+<a name="l00231"></a>00231                     itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
+<a name="l00232"></a>00232 <span class="preprocessor">#else</span>
+<a name="l00233"></a>00233 <span class="preprocessor"></span>                    my_table[i] = ptr + segment_base(i);
+<a name="l00234"></a>00234 <span class="preprocessor">#endif</span>
+<a name="l00235"></a>00235 <span class="preprocessor"></span>            }
+<a name="l00236"></a>00236 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00237"></a>00237 <span class="preprocessor"></span>            itt_store_pointer_with_release_v3( &amp;my_mask, (<span class="keywordtype">void</span>*)(sz-1) );
+<a name="l00238"></a>00238 <span class="preprocessor">#else</span>
+<a name="l00239"></a>00239 <span class="preprocessor"></span>            my_mask = sz - 1;
+<a name="l00240"></a>00240 <span class="preprocessor">#endif</span>
+<a name="l00241"></a>00241 <span class="preprocessor"></span>            watchdog.my_segment_ptr = 0;
+<a name="l00242"></a>00242         }
+<a name="l00243"></a>00243 
+<a name="l00245"></a>00245         bucket *get_bucket( hashcode_t h ) <span class="keyword">const</span> <span class="keywordflow">throw</span>() { <span class="comment">// TODO: add throw() everywhere?</span>
+<a name="l00246"></a>00246             segment_index_t s = segment_index_of( h );
+<a name="l00247"></a>00247             h -= segment_base(s);
+<a name="l00248"></a>00248             segment_ptr_t seg = my_table[s];
+<a name="l00249"></a>00249             __TBB_ASSERT( is_valid(seg), <span class="stringliteral">"hashcode must be cut by valid mask for allocated segments"</span> );
+<a name="l00250"></a>00250             <span class="keywordflow">return</span> &amp;seg[h];
+<a name="l00251"></a>00251         }
+<a name="l00252"></a>00252 
+<a name="l00253"></a>00253         <span class="comment">// internal serial rehashing helper</span>
+<a name="l00254"></a>00254         <span class="keywordtype">void</span> mark_rehashed_levels( hashcode_t h ) <span class="keywordflow">throw</span> () {
+<a name="l00255"></a>00255             segment_index_t s = segment_index_of( h );
+<a name="l00256"></a>00256             <span class="keywordflow">while</span>( segment_ptr_t seg = my_table[++s] )
+<a name="l00257"></a>00257                 <span class="keywordflow">if</span>( seg[h].node_list == rehash_req ) {
+<a name="l00258"></a>00258                     seg[h].node_list = empty_rehashed;
+<a name="l00259"></a>00259                     mark_rehashed_levels( h + segment_base(s) );
+<a name="l00260"></a>00260                 }
+<a name="l00261"></a>00261         }
+<a name="l00262"></a>00262 
+<a name="l00264"></a>00264         <span class="comment">// Splitting into two functions should help inlining</span>
+<a name="l00265"></a>00265         <span class="keyword">inline</span> <span class="keywordtype">bool</span> check_mask_race( <span class="keyword">const</span> hashcode_t h, hashcode_t &amp;m )<span class="keyword"> const </span>{
+<a name="l00266"></a>00266             hashcode_t m_now, m_old = m;
+<a name="l00267"></a>00267 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00268"></a>00268 <span class="preprocessor"></span>            m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l00269"></a>00269 <span class="preprocessor">#else</span>
+<a name="l00270"></a>00270 <span class="preprocessor"></span>            m_now = my_mask;
+<a name="l00271"></a>00271 <span class="preprocessor">#endif</span>
+<a name="l00272"></a>00272 <span class="preprocessor"></span>            <span class="keywordflow">if</span>( m_old != m_now )
+<a name="l00273"></a>00273                 <span class="keywordflow">return</span> check_rehashing_collision( h, m_old, m = m_now );
+<a name="l00274"></a>00274             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00275"></a>00275         }
+<a name="l00276"></a>00276 
+<a name="l00278"></a>00278         <span class="keywordtype">bool</span> check_rehashing_collision( <span class="keyword">const</span> hashcode_t h, hashcode_t m_old, hashcode_t m )<span class="keyword"> const </span>{
+<a name="l00279"></a>00279             __TBB_ASSERT(m_old != m, NULL); <span class="comment">// TODO?: m arg could be optimized out by passing h = h&amp;m</span>
+<a name="l00280"></a>00280             <span class="keywordflow">if</span>( (h &amp; m_old) != (h &amp; m) ) { <span class="comment">// mask changed for this hashcode, rare event</span>
+<a name="l00281"></a>00281                 <span class="comment">// condition above proves that 'h' has some other bits set beside 'm_old'</span>
+<a name="l00282"></a>00282                 <span class="comment">// find next applicable mask after m_old    //TODO: look at bsl instruction</span>
+<a name="l00283"></a>00283                 <span class="keywordflow">for</span>( ++m_old; !(h &amp; m_old); m_old &lt;&lt;= 1 ) <span class="comment">// at maximum few rounds depending on the first block size</span>
+<a name="l00284"></a>00284                     ;
+<a name="l00285"></a>00285                 m_old = (m_old&lt;&lt;1) - 1; <span class="comment">// get full mask from a bit</span>
+<a name="l00286"></a>00286                 __TBB_ASSERT((m_old&amp;(m_old+1))==0 &amp;&amp; m_old &lt;= m, NULL);
+<a name="l00287"></a>00287                 <span class="comment">// check whether it is rehashing/ed</span>
+<a name="l00288"></a>00288 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00289"></a>00289 <span class="preprocessor"></span>                <span class="keywordflow">if</span>( itt_load_pointer_with_acquire_v3(&amp;( get_bucket(h &amp; m_old)-&gt;node_list )) != rehash_req )
+<a name="l00290"></a>00290 <span class="preprocessor">#else</span>
+<a name="l00291"></a>00291 <span class="preprocessor"></span>                <span class="keywordflow">if</span>( __TBB_load_with_acquire(get_bucket( h &amp; m_old )-&gt;node_list) != rehash_req )
+<a name="l00292"></a>00292 <span class="preprocessor">#endif</span>
+<a name="l00293"></a>00293 <span class="preprocessor"></span>                {
+<a name="l00294"></a>00294 <span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l00295"></a>00295 <span class="preprocessor"></span>                    my_info_restarts++; <span class="comment">// race collisions</span>
+<a name="l00296"></a>00296 <span class="preprocessor">#endif</span>
+<a name="l00297"></a>00297 <span class="preprocessor"></span>                    <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00298"></a>00298                 }
+<a name="l00299"></a>00299             }
+<a name="l00300"></a>00300             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00301"></a>00301         }
+<a name="l00302"></a>00302 
+<a name="l00304"></a>00304         segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
+<a name="l00305"></a>00305             size_type sz = ++my_size; <span class="comment">// prefix form is to enforce allocation after the first item inserted</span>
+<a name="l00306"></a>00306             add_to_bucket( b, n );
+<a name="l00307"></a>00307             <span class="comment">// check load factor</span>
+<a name="l00308"></a>00308             <span class="keywordflow">if</span>( sz &gt;= mask ) { <span class="comment">// TODO: add custom load_factor </span>
+<a name="l00309"></a>00309                 segment_index_t new_seg = segment_index_of( mask+1 );
+<a name="l00310"></a>00310                 __TBB_ASSERT( is_valid(my_table[new_seg-1]), <span class="stringliteral">"new allocations must not publish new mask until segment has allocated"</span>);
+<a name="l00311"></a>00311 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00312"></a>00312 <span class="preprocessor"></span>                <span class="keywordflow">if</span>( !itt_load_pointer_v3(my_table+new_seg)
+<a name="l00313"></a>00313 #<span class="keywordflow">else</span>
+<a name="l00314"></a>00314                 <span class="keywordflow">if</span>( !my_table[new_seg]
+<a name="l00315"></a>00315 #endif
+<a name="l00316"></a>00316                   &amp;&amp; __TBB_CompareAndSwapW(&amp;my_table[new_seg], 2, 0) == 0 )
+<a name="l00317"></a>00317                     <span class="keywordflow">return</span> new_seg; <span class="comment">// The value must be processed</span>
+<a name="l00318"></a>00318             }
+<a name="l00319"></a>00319             <span class="keywordflow">return</span> 0;
+<a name="l00320"></a>00320         }
+<a name="l00321"></a>00321 
+<a name="l00323"></a>00323         <span class="keywordtype">void</span> reserve(size_type buckets) {
+<a name="l00324"></a>00324             <span class="keywordflow">if</span>( !buckets-- ) <span class="keywordflow">return</span>;
+<a name="l00325"></a>00325             <span class="keywordtype">bool</span> is_initial = !my_size;
+<a name="l00326"></a>00326             <span class="keywordflow">for</span>( size_type m = my_mask; buckets &gt; m; m = my_mask )
+<a name="l00327"></a>00327                 enable_segment( segment_index_of( m+1 ), is_initial );
+<a name="l00328"></a>00328         }
+<a name="l00330"></a>00330         <span class="keywordtype">void</span> internal_swap(hash_map_base &amp;table) {
+<a name="l00331"></a>00331             std::swap(this-&gt;my_mask, table.my_mask);
+<a name="l00332"></a>00332             std::swap(this-&gt;my_size, table.my_size);
+<a name="l00333"></a>00333             <span class="keywordflow">for</span>(size_type i = 0; i &lt; embedded_buckets; i++)
+<a name="l00334"></a>00334                 std::swap(this-&gt;my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
+<a name="l00335"></a>00335             <span class="keywordflow">for</span>(size_type i = embedded_block; i &lt; pointers_per_table; i++)
+<a name="l00336"></a>00336                 std::swap(this-&gt;my_table[i], table.my_table[i]);
+<a name="l00337"></a>00337         }
+<a name="l00338"></a>00338     };
+<a name="l00339"></a>00339 
+<a name="l00340"></a>00340     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator&gt;
+<a name="l00341"></a>00341     <span class="keyword">class </span>hash_map_range;
+<a name="l00342"></a>00342 
+<a name="l00344"></a>00344 
+<a name="l00346"></a>00346     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00347"></a>00347     <span class="keyword">class </span>hash_map_iterator
+<a name="l00348"></a>00348         : <span class="keyword">public</span> std::iterator&lt;std::forward_iterator_tag,Value&gt;
+<a name="l00349"></a>00349     {
+<a name="l00350"></a>00350         <span class="keyword">typedef</span> Container map_type;
+<a name="l00351"></a>00351         <span class="keyword">typedef</span> <span class="keyword">typename</span> Container::node node;
+<a name="l00352"></a>00352         <span class="keyword">typedef</span> hash_map_base::node_base node_base;
+<a name="l00353"></a>00353         <span class="keyword">typedef</span> hash_map_base::bucket bucket;
+<a name="l00354"></a>00354 
+<a name="l00355"></a>00355         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00356"></a>00356         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> hash_map_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> hash_map_iterator&lt;C,U&gt;&amp; j );
+<a name="l00357"></a>00357 
+<a name="l00358"></a>00358         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00359"></a>00359         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> hash_map_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> hash_map_iterator&lt;C,U&gt;&amp; j );
+<a name="l00360"></a>00360 
+<a name="l00361"></a>00361         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00362"></a>00362         <span class="keyword">friend</span> ptrdiff_t operator-( <span class="keyword">const</span> hash_map_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> hash_map_iterator&lt;C,U&gt;&amp; j );
+<a name="l00363"></a>00363     
+<a name="l00364"></a>00364         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> U&gt;
+<a name="l00365"></a>00365         <span class="keyword">friend</span> <span class="keyword">class </span>hash_map_iterator;
+<a name="l00366"></a>00366 
+<a name="l00367"></a>00367         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00368"></a>00368         <span class="keyword">friend</span> <span class="keyword">class </span>hash_map_range;
+<a name="l00369"></a>00369 
+<a name="l00370"></a>00370         <span class="keywordtype">void</span> advance_to_next_bucket() { <span class="comment">// TODO?: refactor to iterator_base class</span>
+<a name="l00371"></a>00371             size_t k = my_index+1;
+<a name="l00372"></a>00372             <span class="keywordflow">while</span>( my_bucket &amp;&amp; k &lt;= my_map-&gt;my_mask ) {
+<a name="l00373"></a>00373                 <span class="comment">// Following test uses 2's-complement wizardry</span>
+<a name="l00374"></a>00374                 <span class="keywordflow">if</span>( k&amp; (k-2) ) <span class="comment">// not the beginning of a segment</span>
+<a name="l00375"></a>00375                     ++my_bucket;
+<a name="l00376"></a>00376                 <span class="keywordflow">else</span> my_bucket = my_map-&gt;get_bucket( k );
+<a name="l00377"></a>00377                 my_node = static_cast&lt;node*&gt;( my_bucket-&gt;node_list );
+<a name="l00378"></a>00378                 <span class="keywordflow">if</span>( hash_map_base::is_valid(my_node) ) {
+<a name="l00379"></a>00379                     my_index = k; <span class="keywordflow">return</span>;
+<a name="l00380"></a>00380                 }
+<a name="l00381"></a>00381                 ++k;
+<a name="l00382"></a>00382             }
+<a name="l00383"></a>00383             my_bucket = 0; my_node = 0; my_index = k; <span class="comment">// the end</span>
+<a name="l00384"></a>00384         }
+<a name="l00385"></a>00385 <span class="preprocessor">#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)</span>
+<a name="l00386"></a>00386 <span class="preprocessor"></span>        <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l00387"></a>00387         <span class="keyword">friend</span> <span class="keyword">class </span>interface4::concurrent_hash_map;
+<a name="l00388"></a>00388 <span class="preprocessor">#else</span>
+<a name="l00389"></a>00389 <span class="preprocessor"></span>    <span class="keyword">public</span>: <span class="comment">// workaround</span>
+<a name="l00390"></a>00390 <span class="preprocessor">#endif</span>
+<a name="l00392"></a>00392 <span class="preprocessor">        const Container *my_map;</span>
+<a name="l00393"></a>00393 <span class="preprocessor"></span>
+<a name="l00395"></a>00395         size_t my_index;
+<a name="l00396"></a>00396 
+<a name="l00398"></a>00398         <span class="keyword">const</span> bucket *my_bucket;
+<a name="l00399"></a>00399 
+<a name="l00401"></a>00401         node *my_node;
+<a name="l00402"></a>00402 
+<a name="l00403"></a>00403         hash_map_iterator( <span class="keyword">const</span> Container &amp;map, size_t index, <span class="keyword">const</span> bucket *b, node_base *n );
+<a name="l00404"></a>00404 
+<a name="l00405"></a>00405     <span class="keyword">public</span>:
+<a name="l00407"></a>00407         hash_map_iterator() {}
+<a name="l00408"></a>00408         hash_map_iterator( <span class="keyword">const</span> hash_map_iterator&lt;Container,typename Container::value_type&gt; &amp;other ) :
+<a name="l00409"></a>00409             my_map(other.my_map),
+<a name="l00410"></a>00410             my_index(other.my_index),
+<a name="l00411"></a>00411             my_bucket(other.my_bucket),
+<a name="l00412"></a>00412             my_node(other.my_node)
+<a name="l00413"></a>00413         {}
+<a name="l00414"></a>00414         Value&amp; operator*()<span class="keyword"> const </span>{
+<a name="l00415"></a>00415             __TBB_ASSERT( hash_map_base::is_valid(my_node), <span class="stringliteral">"iterator uninitialized or at end of container?"</span> );
+<a name="l00416"></a>00416             <span class="keywordflow">return</span> my_node-&gt;item;
+<a name="l00417"></a>00417         }
+<a name="l00418"></a>00418         Value* operator-&gt;()<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00419"></a>00419         hash_map_iterator&amp; operator++();
+<a name="l00420"></a>00420         
+<a name="l00422"></a>00422         hash_map_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00423"></a>00423             hash_map_iterator old(*<span class="keyword">this</span>);
+<a name="l00424"></a>00424             operator++();
+<a name="l00425"></a>00425             <span class="keywordflow">return</span> old;
+<a name="l00426"></a>00426         }
+<a name="l00427"></a>00427     };
+<a name="l00428"></a>00428 
+<a name="l00429"></a>00429     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00430"></a>00430     hash_map_iterator&lt;Container,Value&gt;::hash_map_iterator( <span class="keyword">const</span> Container &amp;map, size_t index, <span class="keyword">const</span> bucket *b, node_base *n ) :
+<a name="l00431"></a>00431         my_map(&amp;map),
+<a name="l00432"></a>00432         my_index(index),
+<a name="l00433"></a>00433         my_bucket(b),
+<a name="l00434"></a>00434         my_node( static_cast&lt;node*&gt;(n) )
+<a name="l00435"></a>00435     {
+<a name="l00436"></a>00436         <span class="keywordflow">if</span>( b &amp;&amp; !hash_map_base::is_valid(n) )
+<a name="l00437"></a>00437             advance_to_next_bucket();
+<a name="l00438"></a>00438     }
+<a name="l00439"></a>00439 
+<a name="l00440"></a>00440     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00441"></a>00441     hash_map_iterator&lt;Container,Value&gt;&amp; hash_map_iterator&lt;Container,Value&gt;::operator++() {
+<a name="l00442"></a>00442         my_node = static_cast&lt;node*&gt;( my_node-&gt;next );
+<a name="l00443"></a>00443         <span class="keywordflow">if</span>( !my_node ) advance_to_next_bucket();
+<a name="l00444"></a>00444         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00445"></a>00445     }
+<a name="l00446"></a>00446 
+<a name="l00447"></a>00447     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00448"></a>00448     <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> hash_map_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> hash_map_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00449"></a>00449         <span class="keywordflow">return</span> i.my_node == j.my_node &amp;&amp; i.my_map == j.my_map;
+<a name="l00450"></a>00450     }
+<a name="l00451"></a>00451 
+<a name="l00452"></a>00452     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00453"></a>00453     <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> hash_map_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> hash_map_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00454"></a>00454         <span class="keywordflow">return</span> i.my_node != j.my_node || i.my_map != j.my_map;
+<a name="l00455"></a>00455     }
+<a name="l00456"></a>00456 
+<a name="l00458"></a>00458 
+<a name="l00459"></a>00459     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator&gt;
+<a name="l00460"></a>00460     <span class="keyword">class </span>hash_map_range {
+<a name="l00461"></a>00461         <span class="keyword">typedef</span> <span class="keyword">typename</span> Iterator::map_type map_type;
+<a name="l00462"></a>00462         Iterator my_begin;
+<a name="l00463"></a>00463         Iterator my_end;
+<a name="l00464"></a>00464         <span class="keyword">mutable</span> Iterator my_midpoint;
+<a name="l00465"></a>00465         size_t my_grainsize;
+<a name="l00467"></a>00467         <span class="keywordtype">void</span> set_midpoint() <span class="keyword">const</span>;
+<a name="l00468"></a>00468         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">friend</span> <span class="keyword">class </span>hash_map_range;
+<a name="l00469"></a>00469     <span class="keyword">public</span>:
+<a name="l00471"></a>00471         <span class="keyword">typedef</span> std::size_t size_type;
+<a name="l00472"></a>00472         <span class="keyword">typedef</span> <span class="keyword">typename</span> Iterator::value_type value_type;
+<a name="l00473"></a>00473         <span class="keyword">typedef</span> <span class="keyword">typename</span> Iterator::reference reference;
+<a name="l00474"></a>00474         <span class="keyword">typedef</span> <span class="keyword">typename</span> Iterator::difference_type difference_type;
+<a name="l00475"></a>00475         <span class="keyword">typedef</span> Iterator iterator;
+<a name="l00476"></a>00476 
+<a name="l00478"></a>00478         <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_begin==my_end;}
+<a name="l00479"></a>00479 
+<a name="l00481"></a>00481         <span class="keywordtype">bool</span> is_divisible()<span class="keyword"> const </span>{
+<a name="l00482"></a>00482             <span class="keywordflow">return</span> my_midpoint!=my_end;
+<a name="l00483"></a>00483         }
+<a name="l00485"></a>00485         hash_map_range( hash_map_range&amp; r, split ) : 
+<a name="l00486"></a>00486             my_end(r.my_end),
+<a name="l00487"></a>00487             my_grainsize(r.my_grainsize)
+<a name="l00488"></a>00488         {
+<a name="l00489"></a>00489             r.my_end = my_begin = r.my_midpoint;
+<a name="l00490"></a>00490             __TBB_ASSERT( !empty(), <span class="stringliteral">"Splitting despite the range is not divisible"</span> );
+<a name="l00491"></a>00491             __TBB_ASSERT( !r.empty(), <span class="stringliteral">"Splitting despite the range is not divisible"</span> );
+<a name="l00492"></a>00492             set_midpoint();
+<a name="l00493"></a>00493             r.set_midpoint();
+<a name="l00494"></a>00494         }
+<a name="l00496"></a>00496         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt;
+<a name="l00497"></a>00497         hash_map_range( hash_map_range&lt;U&gt;&amp; r) : 
+<a name="l00498"></a>00498             my_begin(r.my_begin),
+<a name="l00499"></a>00499             my_end(r.my_end),
+<a name="l00500"></a>00500             my_midpoint(r.my_midpoint),
+<a name="l00501"></a>00501             my_grainsize(r.my_grainsize)
+<a name="l00502"></a>00502         {}
+<a name="l00503"></a>00503 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00505"></a>00505 <span class="preprocessor">        hash_map_range( const Iterator&amp; begin_, const Iterator&amp; end_, size_type grainsize_ = 1 ) : </span>
+<a name="l00506"></a>00506 <span class="preprocessor"></span>            my_begin(begin_), 
+<a name="l00507"></a>00507             my_end(end_),
+<a name="l00508"></a>00508             my_grainsize(grainsize_)
+<a name="l00509"></a>00509         {
+<a name="l00510"></a>00510             <span class="keywordflow">if</span>(!my_end.my_index &amp;&amp; !my_end.my_bucket) <span class="comment">// end</span>
+<a name="l00511"></a>00511                 my_end.my_index = my_end.my_map-&gt;my_mask + 1;
+<a name="l00512"></a>00512             set_midpoint();
+<a name="l00513"></a>00513             __TBB_ASSERT( grainsize_&gt;0, <span class="stringliteral">"grainsize must be positive"</span> );
+<a name="l00514"></a>00514         }
+<a name="l00515"></a>00515 <span class="preprocessor">#endif</span>
+<a name="l00517"></a>00517 <span class="preprocessor">        hash_map_range( const map_type &amp;map, size_type grainsize_ = 1 ) : </span>
+<a name="l00518"></a>00518 <span class="preprocessor"></span>            my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment-&gt;node_list ) ),
+<a name="l00519"></a>00519             my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
+<a name="l00520"></a>00520             my_grainsize( grainsize_ )
+<a name="l00521"></a>00521         {
+<a name="l00522"></a>00522             __TBB_ASSERT( grainsize_&gt;0, <span class="stringliteral">"grainsize must be positive"</span> );
+<a name="l00523"></a>00523             set_midpoint();
+<a name="l00524"></a>00524         }
+<a name="l00525"></a>00525         <span class="keyword">const</span> Iterator&amp; begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_begin;}
+<a name="l00526"></a>00526         <span class="keyword">const</span> Iterator&amp; end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_end;}
+<a name="l00528"></a>00528         size_type grainsize()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_grainsize;}
+<a name="l00529"></a>00529     };
+<a name="l00530"></a>00530 
+<a name="l00531"></a>00531     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator&gt;
+<a name="l00532"></a>00532     <span class="keywordtype">void</span> hash_map_range&lt;Iterator&gt;::set_midpoint()<span class="keyword"> const </span>{
+<a name="l00533"></a>00533         <span class="comment">// Split by groups of nodes</span>
+<a name="l00534"></a>00534         size_t m = my_end.my_index-my_begin.my_index;
+<a name="l00535"></a>00535         <span class="keywordflow">if</span>( m &gt; my_grainsize ) {
+<a name="l00536"></a>00536             m = my_begin.my_index + m/2u;
+<a name="l00537"></a>00537             hash_map_base::bucket *b = my_begin.my_map-&gt;get_bucket(m);
+<a name="l00538"></a>00538             my_midpoint = Iterator(*my_begin.my_map,m,b,b-&gt;node_list);
+<a name="l00539"></a>00539         } <span class="keywordflow">else</span> {
+<a name="l00540"></a>00540             my_midpoint = my_end;
+<a name="l00541"></a>00541         }
+<a name="l00542"></a>00542         __TBB_ASSERT( my_begin.my_index &lt;= my_midpoint.my_index,
+<a name="l00543"></a>00543             <span class="stringliteral">"my_begin is after my_midpoint"</span> );
+<a name="l00544"></a>00544         __TBB_ASSERT( my_midpoint.my_index &lt;= my_end.my_index,
+<a name="l00545"></a>00545             <span class="stringliteral">"my_midpoint is after my_end"</span> );
+<a name="l00546"></a>00546         __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
+<a name="l00547"></a>00547             <span class="stringliteral">"[my_begin, my_midpoint) range should not be empty"</span> );
+<a name="l00548"></a>00548     }
+<a name="l00549"></a>00549 
+<a name="l00550"></a>00550     } <span class="comment">// internal</span>
+<a name="l00552"></a>00552 <span class="comment"></span>
+<a name="l00554"></a>00554 
+<a name="l00583"></a>00583 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> Allocator&gt;
+<a name="l00584"></a><a class="code" href="a00160.html">00584</a> <span class="keyword">class </span><a class="code" href="a00160.html">concurrent_hash_map</a> : <span class="keyword">protected</span> internal::hash_map_base {
+<a name="l00585"></a>00585     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00586"></a>00586     <span class="keyword">friend</span> <span class="keyword">class </span>internal::hash_map_iterator;
+<a name="l00587"></a>00587 
+<a name="l00588"></a>00588     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00589"></a>00589     <span class="keyword">friend</span> <span class="keyword">class </span>internal::hash_map_range;
+<a name="l00590"></a>00590 
+<a name="l00591"></a>00591 <span class="keyword">public</span>:
+<a name="l00592"></a>00592     <span class="keyword">typedef</span> Key key_type;
+<a name="l00593"></a>00593     <span class="keyword">typedef</span> T mapped_type;
+<a name="l00594"></a>00594     <span class="keyword">typedef</span> std::pair&lt;const Key,T&gt; value_type;
+<a name="l00595"></a>00595     <span class="keyword">typedef</span> hash_map_base::size_type size_type;
+<a name="l00596"></a>00596     <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00597"></a>00597     <span class="keyword">typedef</span> value_type *pointer;
+<a name="l00598"></a>00598     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type *const_pointer;
+<a name="l00599"></a>00599     <span class="keyword">typedef</span> value_type &amp;reference;
+<a name="l00600"></a>00600     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type &amp;const_reference;
+<a name="l00601"></a>00601     <span class="keyword">typedef</span> internal::hash_map_iterator&lt;concurrent_hash_map,value_type&gt; iterator;
+<a name="l00602"></a>00602     <span class="keyword">typedef</span> internal::hash_map_iterator&lt;concurrent_hash_map,const value_type&gt; const_iterator;
+<a name="l00603"></a>00603     <span class="keyword">typedef</span> internal::hash_map_range&lt;iterator&gt; range_type;
+<a name="l00604"></a>00604     <span class="keyword">typedef</span> internal::hash_map_range&lt;const_iterator&gt; const_range_type;
+<a name="l00605"></a>00605     <span class="keyword">typedef</span> Allocator allocator_type;
+<a name="l00606"></a>00606 
+<a name="l00607"></a>00607 <span class="keyword">protected</span>:
+<a name="l00608"></a>00608     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00163.html">const_accessor</a>;
+<a name="l00609"></a>00609     <span class="keyword">struct </span>node;
+<a name="l00610"></a>00610     <span class="keyword">typedef</span> <span class="keyword">typename</span> Allocator::template rebind&lt;node&gt;::other node_allocator_type;
+<a name="l00611"></a>00611     node_allocator_type my_allocator;
+<a name="l00612"></a>00612     HashCompare my_hash_compare;
+<a name="l00613"></a>00613 
+<a name="l00614"></a>00614     <span class="keyword">struct </span>node : <span class="keyword">public</span> node_base {
+<a name="l00615"></a>00615         value_type item;
+<a name="l00616"></a>00616         node( <span class="keyword">const</span> Key &amp;key ) : item(key, T()) {}
+<a name="l00617"></a>00617         node( <span class="keyword">const</span> Key &amp;key, <span class="keyword">const</span> T &amp;t ) : item(key, t) {}
+<a name="l00618"></a>00618         <span class="comment">// exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17</span>
+<a name="l00619"></a>00619         <span class="keywordtype">void</span> *operator new( size_t <span class="comment">/*size*/</span>, node_allocator_type &amp;a ) {
+<a name="l00620"></a>00620             <span class="keywordtype">void</span> *ptr = a.allocate(1);
+<a name="l00621"></a>00621             <span class="keywordflow">if</span>(!ptr) 
+<a name="l00622"></a>00622                 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
+<a name="l00623"></a>00623             <span class="keywordflow">return</span> ptr;
+<a name="l00624"></a>00624         }
+<a name="l00625"></a>00625         <span class="comment">// match placement-new form above to be called if exception thrown in constructor</span>
+<a name="l00626"></a>00626         <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span> *ptr, node_allocator_type &amp;a ) {<span class="keywordflow">return</span> a.deallocate(static_cast&lt;node*&gt;(ptr),1); }
+<a name="l00627"></a>00627     };
+<a name="l00628"></a>00628 
+<a name="l00629"></a>00629     <span class="keywordtype">void</span> delete_node( node_base *n ) {
+<a name="l00630"></a>00630         my_allocator.destroy( static_cast&lt;node*&gt;(n) );
+<a name="l00631"></a>00631         my_allocator.deallocate( static_cast&lt;node*&gt;(n), 1);
+<a name="l00632"></a>00632     }
+<a name="l00633"></a>00633 
+<a name="l00634"></a>00634     node *search_bucket( <span class="keyword">const</span> key_type &amp;key, bucket *b )<span class="keyword"> const </span>{
+<a name="l00635"></a>00635         node *n = static_cast&lt;node*&gt;( b-&gt;node_list );
+<a name="l00636"></a>00636         <span class="keywordflow">while</span>( is_valid(n) &amp;&amp; !my_hash_compare.equal(key, n-&gt;item.first) )
+<a name="l00637"></a>00637             n = static_cast&lt;node*&gt;( n-&gt;next );
+<a name="l00638"></a>00638         __TBB_ASSERT(n != internal::rehash_req, <span class="stringliteral">"Search can be executed only for rehashed bucket"</span>);
+<a name="l00639"></a>00639         <span class="keywordflow">return</span> n;
+<a name="l00640"></a>00640     }
+<a name="l00641"></a>00641 
+<a name="l00643"></a><a class="code" href="a00162.html">00643</a>     <span class="keyword">class </span><a class="code" href="a00162.html">bucket_accessor</a> : <span class="keyword">public</span> bucket::scoped_t {
+<a name="l00644"></a>00644         <span class="keywordtype">bool</span> my_is_writer; <span class="comment">// TODO: use it from base type</span>
+<a name="l00645"></a>00645         bucket *my_b;
+<a name="l00646"></a>00646     <span class="keyword">public</span>:
+<a name="l00647"></a>00647         <a class="code" href="a00162.html">bucket_accessor</a>( <a class="code" href="a00160.html">concurrent_hash_map</a> *base, <span class="keyword">const</span> hashcode_t h, <span class="keywordtype">bool</span> writer = <span class="keyword">false</span> ) { <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>( base, h, writer ); }
+<a name="l00649"></a><a class="code" href="a00162.html#26b4fe0ca87a7ad4852cb787db880119">00649</a>         <span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>( <a class="code" href="a00160.html">concurrent_hash_map</a> *base, <span class="keyword">const</span> hashcode_t h, <span class="keywordtype">bool</span> writer = <span class="keyword">false</span> ) {
+<a name="l00650"></a>00650             my_b = base-&gt;get_bucket( h );
+<a name="l00651"></a>00651 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00652"></a>00652 <span class="preprocessor"></span>            <span class="comment">// TODO: actually, notification is unnecessary here, just hiding double-check</span>
+<a name="l00653"></a>00653             <span class="keywordflow">if</span>( itt_load_pointer_with_acquire_v3(&amp;my_b-&gt;node_list) == internal::rehash_req
+<a name="l00654"></a>00654 #<span class="keywordflow">else</span>
+<a name="l00655"></a>00655             <span class="keywordflow">if</span>( __TBB_load_with_acquire(my_b-&gt;node_list) == internal::rehash_req
+<a name="l00656"></a>00656 #endif
+<a name="l00657"></a>00657                 &amp;&amp; try_acquire( my_b-&gt;mutex, <span class="comment">/*write=*/</span><span class="keyword">true</span> ) )
+<a name="l00658"></a>00658             {
+<a name="l00659"></a>00659                 <span class="keywordflow">if</span>( my_b-&gt;node_list == internal::rehash_req ) base-&gt;<a class="code" href="a00160.html#571d635fd206d9985cf20a1a659ea476">rehash_bucket</a>( my_b, h ); <span class="comment">//recursive rehashing</span>
+<a name="l00660"></a>00660                 my_is_writer = <span class="keyword">true</span>;
+<a name="l00661"></a>00661             }
+<a name="l00662"></a>00662             <span class="keywordflow">else</span> bucket::scoped_t::acquire( my_b-&gt;mutex, <span class="comment">/*write=*/</span>my_is_writer = writer );
+<a name="l00663"></a>00663             __TBB_ASSERT( my_b-&gt;node_list != internal::rehash_req, NULL);
+<a name="l00664"></a>00664         }
+<a name="l00666"></a><a class="code" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">00666</a>         <span class="keywordtype">bool</span> is_writer() { <span class="keywordflow">return</span> my_is_writer; }
+<a name="l00668"></a><a class="code" href="a00162.html#57c6110bd20e95c06de5a199de988941">00668</a>         bucket *operator() () { <span class="keywordflow">return</span> my_b; }
+<a name="l00669"></a>00669         <span class="comment">// TODO: optimize out</span>
+<a name="l00670"></a>00670         <span class="keywordtype">bool</span> upgrade_to_writer() { my_is_writer = <span class="keyword">true</span>; <span class="keywordflow">return</span> bucket::scoped_t::upgrade_to_writer(); }
+<a name="l00671"></a>00671     };
+<a name="l00672"></a>00672 
+<a name="l00673"></a>00673     <span class="comment">// TODO refactor to hash_base</span>
+<a name="l00674"></a>00674     <span class="keywordtype">void</span> rehash_bucket( bucket *b_new, <span class="keyword">const</span> hashcode_t h ) {
+<a name="l00675"></a>00675         __TBB_ASSERT( *(intptr_t*)(&amp;b_new-&gt;mutex), <span class="stringliteral">"b_new must be locked (for write)"</span>);
+<a name="l00676"></a>00676         __TBB_ASSERT( h &gt; 1, <span class="stringliteral">"The lowermost buckets can't be rehashed"</span> );
+<a name="l00677"></a>00677         __TBB_store_with_release(b_new-&gt;node_list, internal::empty_rehashed); <span class="comment">// mark rehashed</span>
+<a name="l00678"></a>00678         hashcode_t mask = ( 1u&lt;&lt;__TBB_Log2( h ) ) - 1; <span class="comment">// get parent mask from the topmost bit</span>
+<a name="l00679"></a>00679 <span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l00680"></a>00680 <span class="preprocessor"></span>        my_info_rehashes++; <span class="comment">// invocations of rehash_bucket</span>
+<a name="l00681"></a>00681 <span class="preprocessor">#endif</span>
+<a name="l00682"></a>00682 <span class="preprocessor"></span>
+<a name="l00683"></a>00683         bucket_accessor b_old( <span class="keyword">this</span>, h &amp; mask );
+<a name="l00684"></a>00684 
+<a name="l00685"></a>00685         mask = (mask&lt;&lt;1) | 1; <span class="comment">// get full mask for new bucket</span>
+<a name="l00686"></a>00686         __TBB_ASSERT( (mask&amp;(mask+1))==0 &amp;&amp; (h &amp; mask) == h, NULL );
+<a name="l00687"></a>00687     restart:
+<a name="l00688"></a>00688         <span class="keywordflow">for</span>( node_base **p = &amp;b_old()-&gt;node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
+<a name="l00689"></a>00689             hashcode_t c = my_hash_compare.hash( static_cast&lt;node*&gt;(n)-&gt;item.first );
+<a name="l00690"></a>00690 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00691"></a>00691 <span class="preprocessor"></span>            hashcode_t bmask = h &amp; (mask&gt;&gt;1);
+<a name="l00692"></a>00692             bmask = bmask==0? 1 : ( 1u&lt;&lt;(__TBB_Log2( bmask )+1 ) ) - 1; <span class="comment">// minimal mask of parent bucket</span>
+<a name="l00693"></a>00693             __TBB_ASSERT( (c &amp; bmask) == (h &amp; bmask), <span class="stringliteral">"hash() function changed for key in table"</span> );
+<a name="l00694"></a>00694 <span class="preprocessor">#endif</span>
+<a name="l00695"></a>00695 <span class="preprocessor"></span>            <span class="keywordflow">if</span>( (c &amp; mask) == h ) {
+<a name="l00696"></a>00696                 <span class="keywordflow">if</span>( !b_old.is_writer() )
+<a name="l00697"></a>00697                     <span class="keywordflow">if</span>( !b_old.upgrade_to_writer() ) {
+<a name="l00698"></a>00698                         <span class="keywordflow">goto</span> restart; <span class="comment">// node ptr can be invalid due to concurrent erase</span>
+<a name="l00699"></a>00699                     }
+<a name="l00700"></a>00700                 *p = n-&gt;next; <span class="comment">// exclude from b_old</span>
+<a name="l00701"></a>00701                 add_to_bucket( b_new, n );
+<a name="l00702"></a>00702             } <span class="keywordflow">else</span> p = &amp;n-&gt;next; <span class="comment">// iterate to next item</span>
+<a name="l00703"></a>00703         }
+<a name="l00704"></a>00704     }
+<a name="l00705"></a>00705 
+<a name="l00706"></a>00706 <span class="keyword">public</span>:
+<a name="l00707"></a>00707     
+<a name="l00708"></a>00708     <span class="keyword">class </span>accessor;
+<a name="l00710"></a><a class="code" href="a00163.html">00710</a>     <span class="keyword">class </span><a class="code" href="a00163.html">const_accessor</a> {
+<a name="l00711"></a>00711         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00160.html">concurrent_hash_map</a>&lt;Key,T,HashCompare,Allocator&gt;;
+<a name="l00712"></a>00712         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00161.html">accessor</a>;
+<a name="l00713"></a>00713         <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> <a class="code" href="a00161.html">accessor</a> &amp; ) <span class="keyword">const</span>; <span class="comment">// Deny access</span>
+<a name="l00714"></a>00714         <a class="code" href="a00163.html">const_accessor</a>( <span class="keyword">const</span> <a class="code" href="a00161.html">accessor</a> &amp; );       <span class="comment">// Deny access</span>
+<a name="l00715"></a>00715     <span class="keyword">public</span>:
+<a name="l00717"></a><a class="code" href="a00163.html#48647ca0d79c1233b997f5768403c926">00717</a>         <span class="keyword">typedef</span> <span class="keyword">const</span> <span class="keyword">typename</span> concurrent_hash_map::value_type value_type;
+<a name="l00718"></a>00718 
+<a name="l00720"></a><a class="code" href="a00163.html#5cce3104cb0a52e08d2131370871c614">00720</a>         <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> !my_node;}
+<a name="l00721"></a>00721 
+<a name="l00723"></a><a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">00723</a>         <span class="keywordtype">void</span> <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>() {
+<a name="l00724"></a>00724             <span class="keywordflow">if</span>( my_node ) {
+<a name="l00725"></a>00725                 my_lock.release();
+<a name="l00726"></a>00726                 my_node = 0;
+<a name="l00727"></a>00727             }
+<a name="l00728"></a>00728         }
+<a name="l00729"></a>00729 
+<a name="l00731"></a><a class="code" href="a00163.html#30f31106840700a4c3664b9cb1c31ca7">00731</a>         const_reference operator*()<span class="keyword"> const </span>{
+<a name="l00732"></a>00732             __TBB_ASSERT( my_node, <span class="stringliteral">"attempt to dereference empty accessor"</span> );
+<a name="l00733"></a>00733             <span class="keywordflow">return</span> my_node-&gt;item;
+<a name="l00734"></a>00734         }
+<a name="l00735"></a>00735 
+<a name="l00737"></a><a class="code" href="a00163.html#3d03a48ecb8cd9549bd8be64b09c9b0d">00737</a>         const_pointer operator-&gt;()<span class="keyword"> const </span>{
+<a name="l00738"></a>00738             <span class="keywordflow">return</span> &amp;operator*();
+<a name="l00739"></a>00739         }
+<a name="l00740"></a>00740 
+<a name="l00742"></a><a class="code" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">00742</a>         <a class="code" href="a00163.html">const_accessor</a>() : my_node(NULL) {}
+<a name="l00743"></a>00743 
+<a name="l00745"></a><a class="code" href="a00163.html#752b0c1ec74b94786403a75e42917d01">00745</a>         ~<a class="code" href="a00163.html">const_accessor</a>() {
+<a name="l00746"></a>00746             my_node = NULL; <span class="comment">// my_lock.release() is called in scoped_lock destructor</span>
+<a name="l00747"></a>00747         }
+<a name="l00748"></a>00748     <span class="keyword">private</span>:
+<a name="l00749"></a>00749         node *my_node;
+<a name="l00750"></a>00750         <span class="keyword">typename</span> node::scoped_t my_lock;
+<a name="l00751"></a>00751         hashcode_t my_hash;
+<a name="l00752"></a>00752     };
+<a name="l00753"></a>00753 
+<a name="l00755"></a><a class="code" href="a00161.html">00755</a>     <span class="keyword">class </span><a class="code" href="a00161.html">accessor</a>: <span class="keyword">public</span> <a class="code" href="a00163.html">const_accessor</a> {
+<a name="l00756"></a>00756     <span class="keyword">public</span>:
+<a name="l00758"></a><a class="code" href="a00161.html#49eec74f272bab187d176c0d9d16a7fe">00758</a>         <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_hash_map::value_type value_type;
+<a name="l00759"></a>00759 
+<a name="l00761"></a><a class="code" href="a00161.html#e8938f0cd1211e88a1d73527ed3636c4">00761</a>         reference operator*()<span class="keyword"> const </span>{
+<a name="l00762"></a>00762             __TBB_ASSERT( this-&gt;my_node, <span class="stringliteral">"attempt to dereference empty accessor"</span> );
+<a name="l00763"></a>00763             <span class="keywordflow">return</span> this-&gt;my_node-&gt;item;
+<a name="l00764"></a>00764         }
+<a name="l00765"></a>00765 
+<a name="l00767"></a><a class="code" href="a00161.html#fcebc32c020202cc37e60eadef157569">00767</a>         pointer operator-&gt;()<span class="keyword"> const </span>{
+<a name="l00768"></a>00768             <span class="keywordflow">return</span> &amp;operator*();
+<a name="l00769"></a>00769         }
+<a name="l00770"></a>00770     };
+<a name="l00771"></a>00771 
+<a name="l00773"></a><a class="code" href="a00160.html#d827bb5e4f61de1916ab67d51c7c6e60">00773</a>     <a class="code" href="a00160.html">concurrent_hash_map</a>(<span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00774"></a>00774         : internal::hash_map_base(), my_allocator(a)
+<a name="l00775"></a>00775     {}
+<a name="l00776"></a>00776 
+<a name="l00778"></a><a class="code" href="a00160.html#a4612d5c7233712d455496641e9b31ff">00778</a>     <a class="code" href="a00160.html">concurrent_hash_map</a>(size_type n, <span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00779"></a>00779         : my_allocator(a)
+<a name="l00780"></a>00780     {
+<a name="l00781"></a>00781         reserve( n );
+<a name="l00782"></a>00782     }
+<a name="l00783"></a>00783 
+<a name="l00785"></a><a class="code" href="a00160.html#df0cd14eaddb17f10929c91519e65be9">00785</a>     <a class="code" href="a00160.html">concurrent_hash_map</a>( <span class="keyword">const</span> <a class="code" href="a00160.html">concurrent_hash_map</a>&amp; table, <span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00786"></a>00786         : internal::hash_map_base(), my_allocator(a)
+<a name="l00787"></a>00787     {
+<a name="l00788"></a>00788         internal_copy(table);
+<a name="l00789"></a>00789     }
+<a name="l00790"></a>00790 
+<a name="l00792"></a>00792     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00793"></a><a class="code" href="a00160.html#3bfe75fcb350ce39cf610c164f233edc">00793</a>     <a class="code" href="a00160.html">concurrent_hash_map</a>(I first, I last, <span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00794"></a>00794         : my_allocator(a)
+<a name="l00795"></a>00795     {
+<a name="l00796"></a>00796         reserve( std::distance(first, last) ); <span class="comment">// TODO: load_factor?</span>
+<a name="l00797"></a>00797         internal_copy(first, last);
+<a name="l00798"></a>00798     }
+<a name="l00799"></a>00799 
+<a name="l00801"></a><a class="code" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">00801</a>     <a class="code" href="a00160.html">concurrent_hash_map</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00160.html">concurrent_hash_map</a>&amp; table ) {
+<a name="l00802"></a>00802         <span class="keywordflow">if</span>( <span class="keyword">this</span>!=&amp;table ) {
+<a name="l00803"></a>00803             clear();
+<a name="l00804"></a>00804             internal_copy(table);
+<a name="l00805"></a>00805         } 
+<a name="l00806"></a>00806         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00807"></a>00807     }
+<a name="l00808"></a>00808 
+<a name="l00809"></a>00809 
+<a name="l00811"></a>00811 
+<a name="l00813"></a>00813     <span class="keywordtype">void</span> rehash(size_type n = 0);
+<a name="l00814"></a>00814     
+<a name="l00816"></a>00816     <span class="keywordtype">void</span> clear();
+<a name="l00817"></a>00817 
+<a name="l00819"></a><a class="code" href="a00160.html#a1ac58997d8fbf242b266e3691573481">00819</a>     ~<a class="code" href="a00160.html">concurrent_hash_map</a>() { clear(); }
+<a name="l00820"></a>00820 
+<a name="l00821"></a>00821     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00822"></a>00822     <span class="comment">// Parallel algorithm support</span>
+<a name="l00823"></a>00823     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00824"></a>00824     range_type range( size_type grainsize=1 ) {
+<a name="l00825"></a>00825         <span class="keywordflow">return</span> range_type( *<span class="keyword">this</span>, grainsize );
+<a name="l00826"></a>00826     }
+<a name="l00827"></a>00827     const_range_type range( size_type grainsize=1 )<span class="keyword"> const </span>{
+<a name="l00828"></a>00828         <span class="keywordflow">return</span> const_range_type( *<span class="keyword">this</span>, grainsize );
+<a name="l00829"></a>00829     }
+<a name="l00830"></a>00830 
+<a name="l00831"></a>00831     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00832"></a>00832     <span class="comment">// STL support - not thread-safe methods</span>
+<a name="l00833"></a>00833     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00834"></a>00834     iterator begin() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>,0,my_embedded_segment,my_embedded_segment-&gt;node_list);}
+<a name="l00835"></a>00835     iterator end() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>,0,0,0);}
+<a name="l00836"></a>00836     const_iterator begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,0,my_embedded_segment,my_embedded_segment-&gt;node_list);}
+<a name="l00837"></a>00837     const_iterator end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,0,0,0);}
+<a name="l00838"></a>00838     std::pair&lt;iterator, iterator&gt; equal_range( <span class="keyword">const</span> Key&amp; key ) { <span class="keywordflow">return</span> internal_equal_range(key, end()); }
+<a name="l00839"></a>00839     std::pair&lt;const_iterator, const_iterator&gt; equal_range( <span class="keyword">const</span> Key&amp; key )<span class="keyword"> const </span>{ <span class="keywordflow">return</span> internal_equal_range(key, end()); }
+<a name="l00840"></a>00840     
+<a name="l00842"></a><a class="code" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">00842</a>     size_type size()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_size; }
+<a name="l00843"></a>00843 
+<a name="l00845"></a><a class="code" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">00845</a>     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_size == 0; }
+<a name="l00846"></a>00846 
+<a name="l00848"></a><a class="code" href="a00160.html#1e45d3cbd1e2ae06f365f1b48e0df0b5">00848</a>     size_type max_size()<span class="keyword"> const </span>{<span class="keywordflow">return</span> (~size_type(0))/<span class="keyword">sizeof</span>(node);}
+<a name="l00849"></a>00849 
+<a name="l00851"></a><a class="code" href="a00160.html#414d15033d36c63aa3a40666dc4d6f5e">00851</a>     size_type bucket_count()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_mask+1; }
+<a name="l00852"></a>00852 
+<a name="l00854"></a><a class="code" href="a00160.html#199208eed6f09e200cda364f906be0fe">00854</a>     allocator_type get_allocator()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> this-&gt;my_allocator; }
+<a name="l00855"></a>00855 
+<a name="l00857"></a>00857     <span class="keywordtype">void</span> swap(<a class="code" href="a00160.html">concurrent_hash_map</a> &amp;table);
+<a name="l00858"></a>00858 
+<a name="l00859"></a>00859     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00860"></a>00860     <span class="comment">// concurrent map operations</span>
+<a name="l00861"></a>00861     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00862"></a>00862 
+<a name="l00864"></a><a class="code" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">00864</a>     size_type count( <span class="keyword">const</span> Key &amp;key )<span class="keyword"> const </span>{
+<a name="l00865"></a>00865         <span class="keywordflow">return</span> const_cast&lt;concurrent_hash_map*&gt;(<span class="keyword">this</span>)-&gt;lookup(<span class="comment">/*insert*/</span><span class="keyword">false</span>, key, NULL, NULL, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00866"></a>00866     }
+<a name="l00867"></a>00867 
+<a name="l00869"></a>00869 
+<a name="l00870"></a><a class="code" href="a00160.html#64338d7f2e35df586af4cb0145cd910f">00870</a>     <span class="keywordtype">bool</span> find( <a class="code" href="a00163.html">const_accessor</a> &amp;result, <span class="keyword">const</span> Key &amp;key )<span class="keyword"> const </span>{
+<a name="l00871"></a>00871         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00872"></a>00872         <span class="keywordflow">return</span> const_cast&lt;concurrent_hash_map*&gt;(<span class="keyword">this</span>)-&gt;lookup(<span class="comment">/*insert*/</span><span class="keyword">false</span>, key, NULL, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00873"></a>00873     }
+<a name="l00874"></a>00874 
+<a name="l00876"></a>00876 
+<a name="l00877"></a><a class="code" href="a00160.html#bce7bdf46435115a95cca2aa73c5da83">00877</a>     <span class="keywordtype">bool</span> find( <a class="code" href="a00161.html">accessor</a> &amp;result, <span class="keyword">const</span> Key &amp;key ) {
+<a name="l00878"></a>00878         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00879"></a>00879         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">false</span>, key, NULL, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">true</span> );
+<a name="l00880"></a>00880     }
+<a name="l00881"></a>00881         
+<a name="l00883"></a>00883 
+<a name="l00884"></a><a class="code" href="a00160.html#58c38b27273de6c670568633c0931854">00884</a>     <span class="keywordtype">bool</span> insert( <a class="code" href="a00163.html">const_accessor</a> &amp;result, <span class="keyword">const</span> Key &amp;key ) {
+<a name="l00885"></a>00885         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00886"></a>00886         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">true</span>, key, NULL, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00887"></a>00887     }
+<a name="l00888"></a>00888 
+<a name="l00890"></a>00890 
+<a name="l00891"></a><a class="code" href="a00160.html#ccfecaa3e71d92be61fb3d811dd264eb">00891</a>     <span class="keywordtype">bool</span> insert( <a class="code" href="a00161.html">accessor</a> &amp;result, <span class="keyword">const</span> Key &amp;key ) {
+<a name="l00892"></a>00892         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00893"></a>00893         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">true</span>, key, NULL, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">true</span> );
+<a name="l00894"></a>00894     }
+<a name="l00895"></a>00895 
+<a name="l00897"></a>00897 
+<a name="l00898"></a><a class="code" href="a00160.html#d4a2816129e38c53128c6d0c7b6b7370">00898</a>     <span class="keywordtype">bool</span> insert( <a class="code" href="a00163.html">const_accessor</a> &amp;result, <span class="keyword">const</span> value_type &amp;value ) {
+<a name="l00899"></a>00899         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00900"></a>00900         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">true</span>, value.first, &amp;value.second, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00901"></a>00901     }
+<a name="l00902"></a>00902 
+<a name="l00904"></a>00904 
+<a name="l00905"></a><a class="code" href="a00160.html#a657e61cd2b13164764ca2708875784a">00905</a>     <span class="keywordtype">bool</span> insert( <a class="code" href="a00161.html">accessor</a> &amp;result, <span class="keyword">const</span> value_type &amp;value ) {
+<a name="l00906"></a>00906         result.<a class="code" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">release</a>();
+<a name="l00907"></a>00907         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">true</span>, value.first, &amp;value.second, &amp;result, <span class="comment">/*write=*/</span><span class="keyword">true</span> );
+<a name="l00908"></a>00908     }
+<a name="l00909"></a>00909 
+<a name="l00911"></a>00911 
+<a name="l00912"></a><a class="code" href="a00160.html#52bffd1066b3d7b793945bc6fa1a71a1">00912</a>     <span class="keywordtype">bool</span> insert( <span class="keyword">const</span> value_type &amp;value ) {
+<a name="l00913"></a>00913         <span class="keywordflow">return</span> lookup(<span class="comment">/*insert*/</span><span class="keyword">true</span>, value.first, &amp;value.second, NULL, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00914"></a>00914     }
+<a name="l00915"></a>00915 
+<a name="l00917"></a>00917     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00918"></a><a class="code" href="a00160.html#cfe172677e5987004ef4a03e22fa338a">00918</a>     <span class="keywordtype">void</span> insert(I first, I last) {
+<a name="l00919"></a>00919         <span class="keywordflow">for</span>(; first != last; ++first)
+<a name="l00920"></a>00920             insert( *first );
+<a name="l00921"></a>00921     }
+<a name="l00922"></a>00922 
+<a name="l00924"></a>00924 
+<a name="l00925"></a>00925     <span class="keywordtype">bool</span> erase( <span class="keyword">const</span> Key&amp; key );
+<a name="l00926"></a>00926 
+<a name="l00928"></a>00928 
+<a name="l00929"></a><a class="code" href="a00160.html#da7e4a50f6bb06191817425ec85fe760">00929</a>     <span class="keywordtype">bool</span> erase( <a class="code" href="a00163.html">const_accessor</a>&amp; item_accessor ) {
+<a name="l00930"></a>00930         <span class="keywordflow">return</span> exclude( item_accessor, <span class="comment">/*readonly=*/</span> <span class="keyword">true</span> );
+<a name="l00931"></a>00931     }
+<a name="l00932"></a>00932 
+<a name="l00934"></a>00934 
+<a name="l00935"></a><a class="code" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">00935</a>     <span class="keywordtype">bool</span> erase( <a class="code" href="a00161.html">accessor</a>&amp; item_accessor ) {
+<a name="l00936"></a>00936         <span class="keywordflow">return</span> exclude( item_accessor, <span class="comment">/*readonly=*/</span> <span class="keyword">false</span> );
+<a name="l00937"></a>00937     }
+<a name="l00938"></a>00938 
+<a name="l00939"></a>00939 <span class="keyword">protected</span>:
+<a name="l00941"></a>00941     <span class="keywordtype">bool</span> lookup( <span class="keywordtype">bool</span> op_insert, <span class="keyword">const</span> Key &amp;key, <span class="keyword">const</span> T *t, const_accessor *result, <span class="keywordtype">bool</span> write );
+<a name="l00942"></a>00942 
+<a name="l00944"></a>00944     <span class="keywordtype">bool</span> exclude( const_accessor &amp;item_accessor, <span class="keywordtype">bool</span> readonly );
+<a name="l00945"></a>00945 
+<a name="l00947"></a>00947     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00948"></a>00948     std::pair&lt;I, I&gt; internal_equal_range( <span class="keyword">const</span> Key&amp; key, I end ) <span class="keyword">const</span>;
+<a name="l00949"></a>00949 
+<a name="l00951"></a>00951     <span class="keywordtype">void</span> internal_copy( <span class="keyword">const</span> <a class="code" href="a00160.html">concurrent_hash_map</a>&amp; source );
+<a name="l00952"></a>00952 
+<a name="l00953"></a>00953     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00954"></a>00954     <span class="keywordtype">void</span> internal_copy(I first, I last);
+<a name="l00955"></a>00955 
+<a name="l00957"></a>00957 
+<a name="l00959"></a><a class="code" href="a00160.html#2f76ed101a0ccc8875b846c2f747897e">00959</a>     const_pointer internal_fast_find( <span class="keyword">const</span> Key&amp; key )<span class="keyword"> const </span>{
+<a name="l00960"></a>00960         hashcode_t h = my_hash_compare.hash( key );
+<a name="l00961"></a>00961 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00962"></a>00962 <span class="preprocessor"></span>        hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l00963"></a>00963 <span class="preprocessor">#else</span>
+<a name="l00964"></a>00964 <span class="preprocessor"></span>        hashcode_t m = my_mask;
+<a name="l00965"></a>00965 <span class="preprocessor">#endif</span>
+<a name="l00966"></a>00966 <span class="preprocessor"></span>        node *n;
+<a name="l00967"></a>00967     restart:
+<a name="l00968"></a>00968         __TBB_ASSERT((m&amp;(m+1))==0, NULL);
+<a name="l00969"></a>00969         bucket *b = get_bucket( h &amp; m );
+<a name="l00970"></a>00970 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00971"></a>00971 <span class="preprocessor"></span>        <span class="comment">// TODO: actually, notification is unnecessary here, just hiding double-check</span>
+<a name="l00972"></a>00972         <span class="keywordflow">if</span>( itt_load_pointer_with_acquire_v3(&amp;b-&gt;node_list) == internal::rehash_req )
+<a name="l00973"></a>00973 <span class="preprocessor">#else</span>
+<a name="l00974"></a>00974 <span class="preprocessor"></span>        <span class="keywordflow">if</span>( __TBB_load_with_acquire(b-&gt;node_list) == internal::rehash_req )
+<a name="l00975"></a>00975 <span class="preprocessor">#endif</span>
+<a name="l00976"></a>00976 <span class="preprocessor"></span>        {
+<a name="l00977"></a>00977             bucket::scoped_t lock;
+<a name="l00978"></a>00978             <span class="keywordflow">if</span>( lock.try_acquire( b-&gt;mutex, <span class="comment">/*write=*/</span><span class="keyword">true</span> ) ) {
+<a name="l00979"></a>00979                 <span class="keywordflow">if</span>( b-&gt;node_list == internal::rehash_req)
+<a name="l00980"></a>00980                     const_cast&lt;concurrent_hash_map*&gt;(<span class="keyword">this</span>)-&gt;rehash_bucket( b, h &amp; m ); <span class="comment">//recursive rehashing</span>
+<a name="l00981"></a>00981             }
+<a name="l00982"></a>00982             <span class="keywordflow">else</span> lock.acquire( b-&gt;mutex, <span class="comment">/*write=*/</span><span class="keyword">false</span> );
+<a name="l00983"></a>00983             __TBB_ASSERT(b-&gt;node_list!=internal::rehash_req,NULL);
+<a name="l00984"></a>00984         }
+<a name="l00985"></a>00985         n = search_bucket( key, b );
+<a name="l00986"></a>00986         <span class="keywordflow">if</span>( n )
+<a name="l00987"></a>00987             <span class="keywordflow">return</span> &amp;n-&gt;item;
+<a name="l00988"></a>00988         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( check_mask_race( h, m ) )
+<a name="l00989"></a>00989             <span class="keywordflow">goto</span> restart;
+<a name="l00990"></a>00990         <span class="keywordflow">return</span> 0;
+<a name="l00991"></a>00991     }
+<a name="l00992"></a>00992 };
+<a name="l00993"></a>00993 
+<a name="l00994"></a>00994 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00995"></a>00995 <span class="preprocessor"></span>    <span class="comment">// Suppress "conditional expression is constant" warning.</span>
+<a name="l00996"></a>00996 <span class="preprocessor">    #pragma warning( push )</span>
+<a name="l00997"></a>00997 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning( disable: 4127 )</span>
+<a name="l00998"></a>00998 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00999"></a>00999 <span class="preprocessor"></span>
+<a name="l01000"></a>01000 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01001"></a><a class="code" href="a00160.html#1f22480a290ddc6c145888d8f985531a">01001</a> <span class="keywordtype">bool</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::lookup</a>( <span class="keywordtype">bool</span> op_insert, <span class="keyword">const</span> Key &amp;key, <span class="keyword">const</span> T *t, <a class="code" href="a00163.html">const_accessor</a> *result, <span class="keywordtype">bool</span> write ) {
+<a name="l01002"></a>01002     __TBB_ASSERT( !result || !result-&gt;<a class="code" href="a00163.html#74e5fd476c306e90361f3377f0fec6d8">my_node</a>, NULL );
+<a name="l01003"></a>01003     <span class="keywordtype">bool</span> return_value;
+<a name="l01004"></a>01004     hashcode_t <span class="keyword">const</span> h = my_hash_compare.hash( key );
+<a name="l01005"></a>01005 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l01006"></a>01006 <span class="preprocessor"></span>    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l01007"></a>01007 <span class="preprocessor">#else</span>
+<a name="l01008"></a>01008 <span class="preprocessor"></span>    hashcode_t m = my_mask;
+<a name="l01009"></a>01009 <span class="preprocessor">#endif</span>
+<a name="l01010"></a>01010 <span class="preprocessor"></span>    segment_index_t grow_segment = 0;
+<a name="l01011"></a>01011     node *n, *tmp_n = 0;
+<a name="l01012"></a>01012     restart:
+<a name="l01013"></a>01013     {<span class="comment">//lock scope</span>
+<a name="l01014"></a>01014         __TBB_ASSERT((m&amp;(m+1))==0, NULL);
+<a name="l01015"></a>01015         return_value = <span class="keyword">false</span>;
+<a name="l01016"></a>01016         <span class="comment">// get bucket</span>
+<a name="l01017"></a>01017         <a class="code" href="a00162.html">bucket_accessor</a> b( <span class="keyword">this</span>, h &amp; m );
+<a name="l01018"></a>01018 
+<a name="l01019"></a>01019         <span class="comment">// find a node</span>
+<a name="l01020"></a>01020         n = search_bucket( key, b() );
+<a name="l01021"></a>01021         <span class="keywordflow">if</span>( op_insert ) {
+<a name="l01022"></a>01022             <span class="comment">// [opt] insert a key</span>
+<a name="l01023"></a>01023             <span class="keywordflow">if</span>( !n ) {
+<a name="l01024"></a>01024                 <span class="keywordflow">if</span>( !tmp_n ) {
+<a name="l01025"></a>01025                     <span class="keywordflow">if</span>(t) tmp_n = <span class="keyword">new</span>( my_allocator ) node(key, *t);
+<a name="l01026"></a>01026                     <span class="keywordflow">else</span>  tmp_n = <span class="keyword">new</span>( my_allocator ) node(key);
+<a name="l01027"></a>01027                 }
+<a name="l01028"></a>01028                 <span class="keywordflow">if</span>( !b.<a class="code" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">is_writer</a>() &amp;&amp; !b.<a class="code" href="a00162.html#8f7f0dc61f528de29d06e6054b4a9835">upgrade_to_writer</a>() ) { <span class="comment">// TODO: improved insertion</span>
+<a name="l01029"></a>01029                     <span class="comment">// Rerun search_list, in case another thread inserted the item during the upgrade.</span>
+<a name="l01030"></a>01030                     n = search_bucket( key, b() );
+<a name="l01031"></a>01031                     <span class="keywordflow">if</span>( is_valid(n) ) { <span class="comment">// unfortunately, it did</span>
+<a name="l01032"></a>01032                         b.downgrade_to_reader();
+<a name="l01033"></a>01033                         <span class="keywordflow">goto</span> exists;
+<a name="l01034"></a>01034                     }
+<a name="l01035"></a>01035                 }
+<a name="l01036"></a>01036                 <span class="keywordflow">if</span>( check_mask_race(h, m) )
+<a name="l01037"></a>01037                     <span class="keywordflow">goto</span> restart; <span class="comment">// b.release() is done in ~b().</span>
+<a name="l01038"></a>01038                 <span class="comment">// insert and set flag to grow the container</span>
+<a name="l01039"></a>01039                 grow_segment = insert_new_node( b(), n = tmp_n, m );
+<a name="l01040"></a>01040                 tmp_n = 0;
+<a name="l01041"></a>01041                 return_value = <span class="keyword">true</span>;
+<a name="l01042"></a>01042             }
+<a name="l01043"></a>01043         } <span class="keywordflow">else</span> { <span class="comment">// find or count</span>
+<a name="l01044"></a>01044             <span class="keywordflow">if</span>( !n ) {
+<a name="l01045"></a>01045                 <span class="keywordflow">if</span>( check_mask_race( h, m ) )
+<a name="l01046"></a>01046                     <span class="keywordflow">goto</span> restart; <span class="comment">// b.release() is done in ~b(). TODO: replace by continue</span>
+<a name="l01047"></a>01047                 <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01048"></a>01048             }
+<a name="l01049"></a>01049             return_value = <span class="keyword">true</span>;
+<a name="l01050"></a>01050         }
+<a name="l01051"></a>01051     exists:
+<a name="l01052"></a>01052         <span class="keywordflow">if</span>( !result ) <span class="keywordflow">goto</span> check_growth;
+<a name="l01053"></a>01053         <span class="comment">// TODO: the following seems as generic/regular operation</span>
+<a name="l01054"></a>01054         <span class="comment">// acquire the item</span>
+<a name="l01055"></a>01055         <span class="keywordflow">if</span>( !result-&gt;<a class="code" href="a00163.html#b57d7e72014cd2ae5a915606410615ce">my_lock</a>.try_acquire( n-&gt;mutex, write ) ) {
+<a name="l01056"></a>01056             <span class="comment">// we are unlucky, prepare for longer wait</span>
+<a name="l01057"></a>01057             <a class="code" href="a00149.html">tbb::internal::atomic_backoff</a> trials;
+<a name="l01058"></a>01058             <span class="keywordflow">do</span> {
+<a name="l01059"></a>01059                 <span class="keywordflow">if</span>( !trials.bounded_pause() ) {
+<a name="l01060"></a>01060                     <span class="comment">// the wait takes really long, restart the operation</span>
+<a name="l01061"></a>01061                     b.release();
+<a name="l01062"></a>01062                     __TBB_ASSERT( !op_insert || !return_value, <span class="stringliteral">"Can't acquire new item in locked bucket?"</span> );
+<a name="l01063"></a>01063                     __TBB_Yield();
+<a name="l01064"></a>01064 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l01065"></a>01065 <span class="preprocessor"></span>                    m = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l01066"></a>01066 <span class="preprocessor">#else</span>
+<a name="l01067"></a>01067 <span class="preprocessor"></span>                    m = my_mask;
+<a name="l01068"></a>01068 <span class="preprocessor">#endif</span>
+<a name="l01069"></a>01069 <span class="preprocessor"></span>                    <span class="keywordflow">goto</span> restart;
+<a name="l01070"></a>01070                 }
+<a name="l01071"></a>01071             } <span class="keywordflow">while</span>( !result-&gt;<a class="code" href="a00163.html#b57d7e72014cd2ae5a915606410615ce">my_lock</a>.try_acquire( n-&gt;mutex, write ) );
+<a name="l01072"></a>01072         }
+<a name="l01073"></a>01073     }<span class="comment">//lock scope</span>
+<a name="l01074"></a>01074     result-&gt;<a class="code" href="a00163.html#74e5fd476c306e90361f3377f0fec6d8">my_node</a> = n;
+<a name="l01075"></a>01075     result-&gt;<a class="code" href="a00163.html#88e677d4e26a67d65d81af524ddd483a">my_hash</a> = h;
+<a name="l01076"></a>01076 check_growth:
+<a name="l01077"></a>01077     <span class="comment">// [opt] grow the container</span>
+<a name="l01078"></a>01078     <span class="keywordflow">if</span>( grow_segment ) {
+<a name="l01079"></a>01079 <span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l01080"></a>01080 <span class="preprocessor"></span>        my_info_resizes++; <span class="comment">// concurrent ones</span>
+<a name="l01081"></a>01081 <span class="preprocessor">#endif</span>
+<a name="l01082"></a>01082 <span class="preprocessor"></span>        enable_segment( grow_segment );
+<a name="l01083"></a>01083     }
+<a name="l01084"></a>01084     <span class="keywordflow">if</span>( tmp_n ) <span class="comment">// if op_insert only</span>
+<a name="l01085"></a>01085         delete_node( tmp_n );
+<a name="l01086"></a>01086     <span class="keywordflow">return</span> return_value;
+<a name="l01087"></a>01087 }
+<a name="l01088"></a>01088 
+<a name="l01089"></a>01089 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01090"></a>01090 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l01091"></a><a class="code" href="a00160.html#976c57edfb7f22b9f91a2e11f141eb4a">01091</a> std::pair&lt;I, I&gt; <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::internal_equal_range</a>( <span class="keyword">const</span> Key&amp; key, I end_ )<span class="keyword"> const </span>{
+<a name="l01092"></a>01092     hashcode_t h = my_hash_compare.hash( key );
+<a name="l01093"></a>01093     hashcode_t m = my_mask;
+<a name="l01094"></a>01094     __TBB_ASSERT((m&amp;(m+1))==0, NULL);
+<a name="l01095"></a>01095     h &amp;= m;
+<a name="l01096"></a>01096     bucket *b = get_bucket( h );
+<a name="l01097"></a>01097     <span class="keywordflow">while</span>( b-&gt;node_list == internal::rehash_req ) {
+<a name="l01098"></a>01098         m = ( 1u&lt;&lt;__TBB_Log2( h ) ) - 1; <span class="comment">// get parent mask from the topmost bit</span>
+<a name="l01099"></a>01099         b = get_bucket( h &amp;= m );
+<a name="l01100"></a>01100     }
+<a name="l01101"></a>01101     node *n = search_bucket( key, b );
+<a name="l01102"></a>01102     <span class="keywordflow">if</span>( !n )
+<a name="l01103"></a>01103         <span class="keywordflow">return</span> std::make_pair(end_, end_);
+<a name="l01104"></a>01104     iterator lower(*<span class="keyword">this</span>, h, b, n), upper(lower);
+<a name="l01105"></a>01105     <span class="keywordflow">return</span> std::make_pair(lower, ++upper);
+<a name="l01106"></a>01106 }
+<a name="l01107"></a>01107 
+<a name="l01108"></a>01108 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01109"></a><a class="code" href="a00160.html#faad2108bd2be75e52293486af59f11e">01109</a> <span class="keywordtype">bool</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::exclude</a>( <a class="code" href="a00163.html">const_accessor</a> &amp;item_accessor, <span class="keywordtype">bool</span> readonly ) {
+<a name="l01110"></a>01110     __TBB_ASSERT( item_accessor.<a class="code" href="a00163.html#74e5fd476c306e90361f3377f0fec6d8">my_node</a>, NULL );
+<a name="l01111"></a>01111     node_base *<span class="keyword">const</span> n = item_accessor.<a class="code" href="a00163.html#74e5fd476c306e90361f3377f0fec6d8">my_node</a>;
+<a name="l01112"></a>01112     item_accessor.<a class="code" href="a00163.html#74e5fd476c306e90361f3377f0fec6d8">my_node</a> = NULL; <span class="comment">// we ought release accessor anyway</span>
+<a name="l01113"></a>01113     hashcode_t <span class="keyword">const</span> h = item_accessor.<a class="code" href="a00163.html#88e677d4e26a67d65d81af524ddd483a">my_hash</a>;
+<a name="l01114"></a>01114 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l01115"></a>01115 <span class="preprocessor"></span>    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l01116"></a>01116 <span class="preprocessor">#else</span>
+<a name="l01117"></a>01117 <span class="preprocessor"></span>    hashcode_t m = my_mask;
+<a name="l01118"></a>01118 <span class="preprocessor">#endif</span>
+<a name="l01119"></a>01119 <span class="preprocessor"></span>    <span class="keywordflow">do</span> {
+<a name="l01120"></a>01120         <span class="comment">// get bucket</span>
+<a name="l01121"></a>01121         <a class="code" href="a00162.html">bucket_accessor</a> b( <span class="keyword">this</span>, h &amp; m, <span class="comment">/*writer=*/</span><span class="keyword">true</span> );
+<a name="l01122"></a>01122         node_base **p = &amp;b()-&gt;node_list;
+<a name="l01123"></a>01123         <span class="keywordflow">while</span>( *p &amp;&amp; *p != n )
+<a name="l01124"></a>01124             p = &amp;(*p)-&gt;next;
+<a name="l01125"></a>01125         <span class="keywordflow">if</span>( !*p ) { <span class="comment">// someone else was the first</span>
+<a name="l01126"></a>01126             <span class="keywordflow">if</span>( check_mask_race( h, m ) )
+<a name="l01127"></a>01127                 <span class="keywordflow">continue</span>;
+<a name="l01128"></a>01128             item_accessor.<a class="code" href="a00163.html#b57d7e72014cd2ae5a915606410615ce">my_lock</a>.release();
+<a name="l01129"></a>01129             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01130"></a>01130         }
+<a name="l01131"></a>01131         __TBB_ASSERT( *p == n, NULL );
+<a name="l01132"></a>01132         *p = n-&gt;next; <span class="comment">// remove from container</span>
+<a name="l01133"></a>01133         my_size--;
+<a name="l01134"></a>01134         <span class="keywordflow">break</span>;
+<a name="l01135"></a>01135     } <span class="keywordflow">while</span>(<span class="keyword">true</span>);
+<a name="l01136"></a>01136     <span class="keywordflow">if</span>( readonly ) <span class="comment">// need to get exclusive lock</span>
+<a name="l01137"></a>01137         item_accessor.<a class="code" href="a00163.html#b57d7e72014cd2ae5a915606410615ce">my_lock</a>.upgrade_to_writer(); <span class="comment">// return value means nothing here</span>
+<a name="l01138"></a>01138     item_accessor.<a class="code" href="a00163.html#b57d7e72014cd2ae5a915606410615ce">my_lock</a>.release();
+<a name="l01139"></a>01139     delete_node( n ); <span class="comment">// Only one thread can delete it due to write lock on the chain_mutex</span>
+<a name="l01140"></a>01140     <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l01141"></a>01141 }
+<a name="l01142"></a>01142 
+<a name="l01143"></a>01143 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01144"></a><a class="code" href="a00160.html#034c3b3ee419edee78e0f2f2b1f0d7ca">01144</a> <span class="keywordtype">bool</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::erase</a>( <span class="keyword">const</span> Key &amp;key ) {
+<a name="l01145"></a>01145     node_base *n;
+<a name="l01146"></a>01146     hashcode_t <span class="keyword">const</span> h = my_hash_compare.hash( key );
+<a name="l01147"></a>01147 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l01148"></a>01148 <span class="preprocessor"></span>    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &amp;my_mask );
+<a name="l01149"></a>01149 <span class="preprocessor">#else</span>
+<a name="l01150"></a>01150 <span class="preprocessor"></span>    hashcode_t m = my_mask;
+<a name="l01151"></a>01151 <span class="preprocessor">#endif</span>
+<a name="l01152"></a>01152 <span class="preprocessor"></span>restart:
+<a name="l01153"></a>01153     {<span class="comment">//lock scope</span>
+<a name="l01154"></a>01154         <span class="comment">// get bucket</span>
+<a name="l01155"></a>01155         <a class="code" href="a00162.html">bucket_accessor</a> b( <span class="keyword">this</span>, h &amp; m );
+<a name="l01156"></a>01156     search:
+<a name="l01157"></a>01157         node_base **p = &amp;b()-&gt;node_list;
+<a name="l01158"></a>01158         n = *p;
+<a name="l01159"></a>01159         <span class="keywordflow">while</span>( is_valid(n) &amp;&amp; !my_hash_compare.equal(key, static_cast&lt;node*&gt;(n)-&gt;item.first ) ) {
+<a name="l01160"></a>01160             p = &amp;n-&gt;next;
+<a name="l01161"></a>01161             n = *p;
+<a name="l01162"></a>01162         }
+<a name="l01163"></a>01163         <span class="keywordflow">if</span>( !n ) { <span class="comment">// not found, but mask could be changed</span>
+<a name="l01164"></a>01164             <span class="keywordflow">if</span>( check_mask_race( h, m ) )
+<a name="l01165"></a>01165                 <span class="keywordflow">goto</span> restart;
+<a name="l01166"></a>01166             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01167"></a>01167         }
+<a name="l01168"></a>01168         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( !b.<a class="code" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">is_writer</a>() &amp;&amp; !b.<a class="code" href="a00162.html#8f7f0dc61f528de29d06e6054b4a9835">upgrade_to_writer</a>() ) {
+<a name="l01169"></a>01169             <span class="keywordflow">if</span>( check_mask_race( h, m ) ) <span class="comment">// contended upgrade, check mask</span>
+<a name="l01170"></a>01170                 <span class="keywordflow">goto</span> restart;
+<a name="l01171"></a>01171             <span class="keywordflow">goto</span> search;
+<a name="l01172"></a>01172         }
+<a name="l01173"></a>01173         *p = n-&gt;next;
+<a name="l01174"></a>01174         my_size--;
+<a name="l01175"></a>01175     }
+<a name="l01176"></a>01176     {
+<a name="l01177"></a>01177         <span class="keyword">typename</span> node::scoped_t item_locker( n-&gt;mutex, <span class="comment">/*write=*/</span><span class="keyword">true</span> );
+<a name="l01178"></a>01178     }
+<a name="l01179"></a>01179     <span class="comment">// note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!</span>
+<a name="l01180"></a>01180     delete_node( n ); <span class="comment">// Only one thread can delete it due to write lock on the bucket</span>
+<a name="l01181"></a>01181     <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l01182"></a>01182 }
+<a name="l01183"></a>01183 
+<a name="l01184"></a>01184 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01185"></a>01185 <span class="keywordtype">void</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::swap</a>(<a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;</a> &amp;table) {
+<a name="l01186"></a>01186     std::swap(this-&gt;my_allocator, table.<a class="code" href="a00160.html#252e91d8029f6308db7179557e3b1436">my_allocator</a>);
+<a name="l01187"></a>01187     std::swap(this-&gt;my_hash_compare, table.<a class="code" href="a00160.html#f738f241c8500ce3dbf0f9028ca8b602">my_hash_compare</a>);
+<a name="l01188"></a>01188     internal_swap(table);
+<a name="l01189"></a>01189 }
+<a name="l01190"></a>01190 
+<a name="l01191"></a>01191 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01192"></a><a class="code" href="a00160.html#13f3f2e8de7564be03882c31559493c9">01192</a> <span class="keywordtype">void</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::rehash</a>(size_type sz) {
+<a name="l01193"></a>01193     reserve( sz ); <span class="comment">// TODO: add reduction of number of buckets as well</span>
+<a name="l01194"></a>01194     hashcode_t mask = my_mask;
+<a name="l01195"></a>01195     hashcode_t b = (mask+1)&gt;&gt;1; <span class="comment">// size or first index of the last segment</span>
+<a name="l01196"></a>01196     __TBB_ASSERT((b&amp;(b-1))==0, NULL);
+<a name="l01197"></a>01197     bucket *bp = get_bucket( b ); <span class="comment">// only the last segment should be scanned for rehashing</span>
+<a name="l01198"></a>01198     <span class="keywordflow">for</span>(; b &lt;= mask; b++, bp++ ) {
+<a name="l01199"></a>01199         node_base *n = bp-&gt;node_list;
+<a name="l01200"></a>01200         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, <span class="stringliteral">"Broken internal structure"</span> );
+<a name="l01201"></a>01201         __TBB_ASSERT( *reinterpret_cast&lt;intptr_t*&gt;(&amp;bp-&gt;mutex) == 0, <span class="stringliteral">"concurrent or unexpectedly terminated operation during rehash() execution"</span> );
+<a name="l01202"></a>01202         <span class="keywordflow">if</span>( n == internal::rehash_req ) { <span class="comment">// rehash bucket, conditional because rehashing of a previous bucket may affect this one</span>
+<a name="l01203"></a>01203             hashcode_t h = b; bucket *b_old = bp;
+<a name="l01204"></a>01204             <span class="keywordflow">do</span> {
+<a name="l01205"></a>01205                 __TBB_ASSERT( h &gt; 1, <span class="stringliteral">"The lowermost buckets can't be rehashed"</span> );
+<a name="l01206"></a>01206                 hashcode_t m = ( 1u&lt;&lt;__TBB_Log2( h ) ) - 1; <span class="comment">// get parent mask from the topmost bit</span>
+<a name="l01207"></a>01207                 b_old = get_bucket( h &amp;= m );
+<a name="l01208"></a>01208             } <span class="keywordflow">while</span>( b_old-&gt;node_list == internal::rehash_req );
+<a name="l01209"></a>01209             <span class="comment">// now h - is index of the root rehashed bucket b_old</span>
+<a name="l01210"></a>01210             mark_rehashed_levels( h ); <span class="comment">// mark all non-rehashed children recursively across all segments</span>
+<a name="l01211"></a>01211             <span class="keywordflow">for</span>( node_base **p = &amp;b_old-&gt;node_list, *q = *p; is_valid(q); q = *p ) {
+<a name="l01212"></a>01212                 hashcode_t c = my_hash_compare.hash( static_cast&lt;node*&gt;(q)-&gt;item.first );
+<a name="l01213"></a>01213                 <span class="keywordflow">if</span>( (c &amp; mask) != h ) { <span class="comment">// should be rehashed</span>
+<a name="l01214"></a>01214                     *p = q-&gt;next; <span class="comment">// exclude from b_old</span>
+<a name="l01215"></a>01215                     bucket *b_new = get_bucket( c &amp; mask );
+<a name="l01216"></a>01216                     __TBB_ASSERT( b_new-&gt;node_list != internal::rehash_req, <span class="stringliteral">"hash() function changed for key in table or internal error"</span> );
+<a name="l01217"></a>01217                     add_to_bucket( b_new, q );
+<a name="l01218"></a>01218                 } <span class="keywordflow">else</span> p = &amp;q-&gt;next; <span class="comment">// iterate to next item</span>
+<a name="l01219"></a>01219             }
+<a name="l01220"></a>01220         }
+<a name="l01221"></a>01221     }
+<a name="l01222"></a>01222 <span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01223"></a>01223 <span class="preprocessor"></span>    <span class="keywordtype">int</span> current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; <span class="comment">// usage statistics</span>
+<a name="l01224"></a>01224     <span class="keyword">static</span> <span class="keywordtype">bool</span> reported = <span class="keyword">false</span>;
+<a name="l01225"></a>01225 <span class="preprocessor">#endif</span>
+<a name="l01226"></a>01226 <span class="preprocessor"></span><span class="preprocessor">#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01227"></a>01227 <span class="preprocessor"></span>    <span class="keywordflow">for</span>( b = 0; b &lt;= mask; b++ ) {<span class="comment">// only last segment should be scanned for rehashing</span>
+<a name="l01228"></a>01228         <span class="keywordflow">if</span>( b &amp; (b-2) ) ++bp; <span class="comment">// not the beginning of a segment</span>
+<a name="l01229"></a>01229         <span class="keywordflow">else</span> bp = get_bucket( b );
+<a name="l01230"></a>01230         node_base *n = bp-&gt;node_list;
+<a name="l01231"></a>01231         __TBB_ASSERT( *reinterpret_cast&lt;intptr_t*&gt;(&amp;bp-&gt;mutex) == 0, <span class="stringliteral">"concurrent or unexpectedly terminated operation during rehash() execution"</span> );
+<a name="l01232"></a>01232         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, <span class="stringliteral">"Broken internal structure"</span> );
+<a name="l01233"></a>01233 <span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01234"></a>01234 <span class="preprocessor"></span>        <span class="keywordflow">if</span>( n == internal::empty_rehashed ) empty_buckets++;
+<a name="l01235"></a>01235         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( n-&gt;next ) overpopulated_buckets++;
+<a name="l01236"></a>01236 <span class="preprocessor">#endif</span>
+<a name="l01237"></a>01237 <span class="preprocessor"></span><span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l01238"></a>01238 <span class="preprocessor"></span>        <span class="keywordflow">for</span>( ; is_valid(n); n = n-&gt;next ) {
+<a name="l01239"></a>01239             hashcode_t h = my_hash_compare.hash( static_cast&lt;node*&gt;(n)-&gt;item.first ) &amp; mask;
+<a name="l01240"></a>01240             __TBB_ASSERT( h == b, <span class="stringliteral">"hash() function changed for key in table or internal error"</span> );
+<a name="l01241"></a>01241         }
+<a name="l01242"></a>01242 <span class="preprocessor">#endif</span>
+<a name="l01243"></a>01243 <span class="preprocessor"></span>    }
+<a name="l01244"></a>01244 <span class="preprocessor">#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01245"></a>01245 <span class="preprocessor"></span><span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01246"></a>01246 <span class="preprocessor"></span>    <span class="keywordflow">if</span>( buckets &gt; current_size) empty_buckets -= buckets - current_size;
+<a name="l01247"></a>01247     <span class="keywordflow">else</span> overpopulated_buckets -= current_size - buckets; <span class="comment">// TODO: load_factor?</span>
+<a name="l01248"></a>01248     <span class="keywordflow">if</span>( !reported &amp;&amp; buckets &gt;= 512 &amp;&amp; ( 2*empty_buckets &gt; current_size || 2*overpopulated_buckets &gt; current_size ) ) {
+<a name="l01249"></a>01249         tbb::internal::runtime_warning(
+<a name="l01250"></a>01250             <span class="stringliteral">"Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d"</span>,
+<a name="l01251"></a>01251             <span class="keyword">typeid</span>(*this).name(), current_size, empty_buckets, overpopulated_buckets );
+<a name="l01252"></a>01252         reported = <span class="keyword">true</span>;
+<a name="l01253"></a>01253     }
+<a name="l01254"></a>01254 <span class="preprocessor">#endif</span>
+<a name="l01255"></a>01255 <span class="preprocessor"></span>}
+<a name="l01256"></a>01256 
+<a name="l01257"></a>01257 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01258"></a><a class="code" href="a00160.html#a9f89be8fe28835749529d91081a2511">01258</a> <span class="keywordtype">void</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::clear</a>() {
+<a name="l01259"></a>01259     hashcode_t m = my_mask;
+<a name="l01260"></a>01260     __TBB_ASSERT((m&amp;(m+1))==0, NULL);
+<a name="l01261"></a>01261 <span class="preprocessor">#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01262"></a>01262 <span class="preprocessor"></span><span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01263"></a>01263 <span class="preprocessor"></span>    <span class="keywordtype">int</span> current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; <span class="comment">// usage statistics</span>
+<a name="l01264"></a>01264     <span class="keyword">static</span> <span class="keywordtype">bool</span> reported = <span class="keyword">false</span>;
+<a name="l01265"></a>01265 <span class="preprocessor">#endif</span>
+<a name="l01266"></a>01266 <span class="preprocessor"></span>    bucket *bp = 0;
+<a name="l01267"></a>01267     <span class="comment">// check consistency</span>
+<a name="l01268"></a>01268     <span class="keywordflow">for</span>( segment_index_t b = 0; b &lt;= m; b++ ) {
+<a name="l01269"></a>01269         <span class="keywordflow">if</span>( b &amp; (b-2) ) ++bp; <span class="comment">// not the beginning of a segment</span>
+<a name="l01270"></a>01270         <span class="keywordflow">else</span> bp = get_bucket( b );
+<a name="l01271"></a>01271         node_base *n = bp-&gt;node_list;
+<a name="l01272"></a>01272         __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, <span class="stringliteral">"Broken internal structure"</span> );
+<a name="l01273"></a>01273         __TBB_ASSERT( *reinterpret_cast&lt;intptr_t*&gt;(&amp;bp-&gt;mutex) == 0, <span class="stringliteral">"concurrent or unexpectedly terminated operation during clear() execution"</span> );
+<a name="l01274"></a>01274 <span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01275"></a>01275 <span class="preprocessor"></span>        <span class="keywordflow">if</span>( n == internal::empty_rehashed ) empty_buckets++;
+<a name="l01276"></a>01276         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( n == internal::rehash_req ) buckets--;
+<a name="l01277"></a>01277         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( n-&gt;next ) overpopulated_buckets++;
+<a name="l01278"></a>01278 <span class="preprocessor">#endif</span>
+<a name="l01279"></a>01279 <span class="preprocessor"></span><span class="preprocessor">#if __TBB_EXTRA_DEBUG</span>
+<a name="l01280"></a>01280 <span class="preprocessor"></span>        <span class="keywordflow">for</span>(; is_valid(n); n = n-&gt;next ) {
+<a name="l01281"></a>01281             hashcode_t h = my_hash_compare.hash( static_cast&lt;node*&gt;(n)-&gt;item.first );
+<a name="l01282"></a>01282             h &amp;= m;
+<a name="l01283"></a>01283             __TBB_ASSERT( h == b || get_bucket(h)-&gt;node_list == internal::rehash_req, <span class="stringliteral">"hash() function changed for key in table or internal error"</span> );
+<a name="l01284"></a>01284         }
+<a name="l01285"></a>01285 <span class="preprocessor">#endif</span>
+<a name="l01286"></a>01286 <span class="preprocessor"></span>    }
+<a name="l01287"></a>01287 <span class="preprocessor">#if TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01288"></a>01288 <span class="preprocessor"></span><span class="preprocessor">#if __TBB_STATISTICS</span>
+<a name="l01289"></a>01289 <span class="preprocessor"></span>    printf( <span class="stringliteral">"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"</span>
+<a name="l01290"></a>01290         <span class="stringliteral">" concurrent: resizes=%u rehashes=%u restarts=%u\n"</span>,
+<a name="l01291"></a>01291         current_size, <span class="keywordtype">int</span>(m+1), buckets, empty_buckets, overpopulated_buckets,
+<a name="l01292"></a>01292         <span class="keywordtype">unsigned</span>(my_info_resizes), <span class="keywordtype">unsigned</span>(my_info_rehashes), <span class="keywordtype">unsigned</span>(my_info_restarts) );
+<a name="l01293"></a>01293     my_info_resizes = 0; <span class="comment">// concurrent ones</span>
+<a name="l01294"></a>01294     my_info_restarts = 0; <span class="comment">// race collisions</span>
+<a name="l01295"></a>01295     my_info_rehashes = 0;  <span class="comment">// invocations of rehash_bucket</span>
+<a name="l01296"></a>01296 <span class="preprocessor">#endif</span>
+<a name="l01297"></a>01297 <span class="preprocessor"></span>    <span class="keywordflow">if</span>( buckets &gt; current_size) empty_buckets -= buckets - current_size;
+<a name="l01298"></a>01298     <span class="keywordflow">else</span> overpopulated_buckets -= current_size - buckets; <span class="comment">// TODO: load_factor?</span>
+<a name="l01299"></a>01299     <span class="keywordflow">if</span>( !reported &amp;&amp; buckets &gt;= 512 &amp;&amp; ( 2*empty_buckets &gt; current_size || 2*overpopulated_buckets &gt; current_size ) ) {
+<a name="l01300"></a>01300         tbb::internal::runtime_warning(
+<a name="l01301"></a>01301             <span class="stringliteral">"Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d"</span>,
+<a name="l01302"></a>01302             <span class="keyword">typeid</span>(*this).name(), current_size, empty_buckets, overpopulated_buckets );
+<a name="l01303"></a>01303         reported = <span class="keyword">true</span>;
+<a name="l01304"></a>01304     }
+<a name="l01305"></a>01305 <span class="preprocessor">#endif</span>
+<a name="l01306"></a>01306 <span class="preprocessor"></span><span class="preprocessor">#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l01307"></a>01307 <span class="preprocessor"></span>    my_size = 0;
+<a name="l01308"></a>01308     segment_index_t s = segment_index_of( m );
+<a name="l01309"></a>01309     __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], <span class="stringliteral">"wrong mask or concurrent grow"</span> );
+<a name="l01310"></a>01310     <a class="code" href="a00155.html">cache_aligned_allocator&lt;bucket&gt;</a> alloc;
+<a name="l01311"></a>01311     <span class="keywordflow">do</span> {
+<a name="l01312"></a>01312         __TBB_ASSERT( is_valid( my_table[s] ), <span class="stringliteral">"wrong mask or concurrent grow"</span> );
+<a name="l01313"></a>01313         segment_ptr_t buckets_ptr = my_table[s];
+<a name="l01314"></a>01314         size_type sz = segment_size( s ? s : 1 );
+<a name="l01315"></a>01315         <span class="keywordflow">for</span>( segment_index_t i = 0; i &lt; sz; i++ )
+<a name="l01316"></a>01316             <span class="keywordflow">for</span>( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
+<a name="l01317"></a>01317                 buckets_ptr[i].node_list = n-&gt;next;
+<a name="l01318"></a>01318                 delete_node( n );
+<a name="l01319"></a>01319             }
+<a name="l01320"></a>01320         <span class="keywordflow">if</span>( s &gt;= first_block) <span class="comment">// the first segment or the next</span>
+<a name="l01321"></a>01321             alloc.<a class="code" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">deallocate</a>( buckets_ptr, sz );
+<a name="l01322"></a>01322         <span class="keywordflow">else</span> <span class="keywordflow">if</span>( s == embedded_block &amp;&amp; embedded_block != first_block )
+<a name="l01323"></a>01323             alloc.<a class="code" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">deallocate</a>( buckets_ptr, segment_size(first_block)-embedded_buckets );
+<a name="l01324"></a>01324         <span class="keywordflow">if</span>( s &gt;= embedded_block ) my_table[s] = 0;
+<a name="l01325"></a>01325     } <span class="keywordflow">while</span>(s-- &gt; 0);
+<a name="l01326"></a>01326     my_mask = embedded_buckets - 1;
+<a name="l01327"></a>01327 }
+<a name="l01328"></a>01328 
+<a name="l01329"></a>01329 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01330"></a><a class="code" href="a00160.html#3c27779fe66b79505390d084310d997e">01330</a> <span class="keywordtype">void</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::internal_copy</a>( <span class="keyword">const</span> <a class="code" href="a00160.html">concurrent_hash_map</a>&amp; source ) {
+<a name="l01331"></a>01331     reserve( source.my_size ); <span class="comment">// TODO: load_factor?</span>
+<a name="l01332"></a>01332     hashcode_t mask = source.my_mask;
+<a name="l01333"></a>01333     <span class="keywordflow">if</span>( my_mask == mask ) { <span class="comment">// optimized version</span>
+<a name="l01334"></a>01334         bucket *dst = 0, *src = 0;
+<a name="l01335"></a>01335         <span class="keywordtype">bool</span> rehash_required = <span class="keyword">false</span>;
+<a name="l01336"></a>01336         <span class="keywordflow">for</span>( hashcode_t k = 0; k &lt;= mask; k++ ) {
+<a name="l01337"></a>01337             <span class="keywordflow">if</span>( k &amp; (k-2) ) ++dst,src++; <span class="comment">// not the beginning of a segment</span>
+<a name="l01338"></a>01338             <span class="keywordflow">else</span> { dst = get_bucket( k ); src = source.get_bucket( k ); }
+<a name="l01339"></a>01339             __TBB_ASSERT( dst-&gt;node_list != internal::rehash_req, <span class="stringliteral">"Invalid bucket in destination table"</span>);
+<a name="l01340"></a>01340             node *n = static_cast&lt;node*&gt;( src-&gt;node_list );
+<a name="l01341"></a>01341             <span class="keywordflow">if</span>( n == internal::rehash_req ) { <span class="comment">// source is not rehashed, items are in previous buckets</span>
+<a name="l01342"></a>01342                 rehash_required = <span class="keyword">true</span>;
+<a name="l01343"></a>01343                 dst-&gt;node_list = internal::rehash_req;
+<a name="l01344"></a>01344             } <span class="keywordflow">else</span> <span class="keywordflow">for</span>(; n; n = static_cast&lt;node*&gt;( n-&gt;next ) ) {
+<a name="l01345"></a>01345                 add_to_bucket( dst, <span class="keyword">new</span>( my_allocator ) node(n-&gt;item.first, n-&gt;item.second) );
+<a name="l01346"></a>01346                 ++my_size; <span class="comment">// TODO: replace by non-atomic op</span>
+<a name="l01347"></a>01347             }
+<a name="l01348"></a>01348         }
+<a name="l01349"></a>01349         <span class="keywordflow">if</span>( rehash_required ) <a class="code" href="a00160.html#13f3f2e8de7564be03882c31559493c9">rehash</a>();
+<a name="l01350"></a>01350     } <span class="keywordflow">else</span> <a class="code" href="a00160.html#3c27779fe66b79505390d084310d997e">internal_copy</a>( source.<a class="code" href="a00160.html#4a3c58cf1234b74ca796dcf555d32f53">begin</a>(), source.<a class="code" href="a00160.html#28c690486d8db5783475f5b1a59d21bc">end</a>() );
+<a name="l01351"></a>01351 }
+<a name="l01352"></a>01352 
+<a name="l01353"></a>01353 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01354"></a>01354 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l01355"></a>01355 <span class="keywordtype">void</span> <a class="code" href="a00160.html">concurrent_hash_map&lt;Key,T,HashCompare,A&gt;::internal_copy</a>(I first, I last) {
+<a name="l01356"></a>01356     hashcode_t m = my_mask;
+<a name="l01357"></a>01357     <span class="keywordflow">for</span>(; first != last; ++first) {
+<a name="l01358"></a>01358         hashcode_t h = my_hash_compare.hash( first-&gt;first );
+<a name="l01359"></a>01359         bucket *b = get_bucket( h &amp; m );
+<a name="l01360"></a>01360         __TBB_ASSERT( b-&gt;node_list != internal::rehash_req, <span class="stringliteral">"Invalid bucket in destination table"</span>);
+<a name="l01361"></a>01361         node *n = <span class="keyword">new</span>( my_allocator ) node(first-&gt;first, first-&gt;second);
+<a name="l01362"></a>01362         add_to_bucket( b, n );
+<a name="l01363"></a>01363         ++my_size; <span class="comment">// TODO: replace by non-atomic op</span>
+<a name="l01364"></a>01364     }
+<a name="l01365"></a>01365 }
+<a name="l01366"></a>01366 
+<a name="l01367"></a>01367 } <span class="comment">// namespace interface4</span>
+<a name="l01368"></a>01368 
+<a name="l01369"></a>01369 <span class="keyword">using</span> interface4::concurrent_hash_map;
+<a name="l01370"></a>01370 
+<a name="l01371"></a>01371 
+<a name="l01372"></a>01372 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A1, <span class="keyword">typename</span> A2&gt;
+<a name="l01373"></a>01373 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> concurrent_hash_map&lt;Key, T, HashCompare, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_hash_map&lt;Key, T, HashCompare, A2&gt; &amp;b) {
+<a name="l01374"></a>01374     <span class="keywordflow">if</span>(a.size() != b.size()) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01375"></a>01375     <span class="keyword">typename</span> concurrent_hash_map&lt;Key, T, HashCompare, A1&gt;::const_iterator i(a.begin()), i_end(a.end());
+<a name="l01376"></a>01376     <span class="keyword">typename</span> concurrent_hash_map&lt;Key, T, HashCompare, A2&gt;::const_iterator j, j_end(b.end());
+<a name="l01377"></a>01377     <span class="keywordflow">for</span>(; i != i_end; ++i) {
+<a name="l01378"></a>01378         j = b.equal_range(i-&gt;first).first;
+<a name="l01379"></a>01379         <span class="keywordflow">if</span>( j == j_end || !(i-&gt;second == j-&gt;second) ) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01380"></a>01380     }
+<a name="l01381"></a>01381     <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l01382"></a>01382 }
+<a name="l01383"></a>01383 
+<a name="l01384"></a>01384 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A1, <span class="keyword">typename</span> A2&gt;
+<a name="l01385"></a>01385 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> concurrent_hash_map&lt;Key, T, HashCompare, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_hash_map&lt;Key, T, HashCompare, A2&gt; &amp;b)
+<a name="l01386"></a>01386 {    <span class="keywordflow">return</span> !(a == b); }
+<a name="l01387"></a>01387 
+<a name="l01388"></a>01388 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> HashCompare, <span class="keyword">typename</span> A&gt;
+<a name="l01389"></a>01389 <span class="keyword">inline</span> <span class="keywordtype">void</span> swap(concurrent_hash_map&lt;Key, T, HashCompare, A&gt; &amp;a, concurrent_hash_map&lt;Key, T, HashCompare, A&gt; &amp;b)
+<a name="l01390"></a>01390 {    a.swap( b ); }
+<a name="l01391"></a>01391 
+<a name="l01392"></a>01392 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l01393"></a>01393 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning( pop )</span>
+<a name="l01394"></a>01394 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4127 is back</span>
+<a name="l01395"></a>01395 <span class="preprocessor"></span>
+<a name="l01396"></a>01396 } <span class="comment">// namespace tbb</span>
+<a name="l01397"></a>01397 
+<a name="l01398"></a>01398 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_concurrent_hash_map_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00341.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00341.html
new file mode 100644 (file)
index 0000000..4fe50f8
--- /dev/null
@@ -0,0 +1,365 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>concurrent_queue.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>concurrent_queue.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_concurrent_queue_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_queue_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "_concurrent_queue_internal.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 
+<a name="l00028"></a>00028 <span class="keyword">namespace </span>strict_ppl {
+<a name="l00029"></a>00029 
+<a name="l00031"></a>00031 
+<a name="l00034"></a>00034 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> A = cache_aligned_allocator&lt;T&gt; &gt; 
+<a name="l00035"></a><a class="code" href="a00164.html">00035</a> <span class="keyword">class </span><a class="code" href="a00164.html">concurrent_queue</a>: <span class="keyword">public</span> internal::concurrent_queue_base_v3&lt;T&gt; {
+<a name="l00036"></a>00036     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::concurrent_queue_iterator;
+<a name="l00037"></a>00037 
+<a name="l00039"></a>00039     <span class="keyword">typedef</span> <span class="keyword">typename</span> A::template rebind&lt;char&gt;::other page_allocator_type;
+<a name="l00040"></a>00040     page_allocator_type my_allocator;
+<a name="l00041"></a>00041 
+<a name="l00043"></a>00043     <span class="comment">/*overide*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> *allocate_block( size_t n ) {
+<a name="l00044"></a>00044         <span class="keywordtype">void</span> *b = reinterpret_cast&lt;void*&gt;(my_allocator.allocate( n ));
+<a name="l00045"></a>00045         <span class="keywordflow">if</span>( !b )
+<a name="l00046"></a>00046             internal::throw_exception(internal::eid_bad_alloc); 
+<a name="l00047"></a>00047         <span class="keywordflow">return</span> b;
+<a name="l00048"></a>00048     }
+<a name="l00049"></a>00049 
+<a name="l00051"></a>00051     <span class="comment">/*override*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_block( <span class="keywordtype">void</span> *b, size_t n ) {
+<a name="l00052"></a>00052         my_allocator.deallocate( reinterpret_cast&lt;char*&gt;(b), n );
+<a name="l00053"></a>00053     }
+<a name="l00054"></a>00054 
+<a name="l00055"></a>00055 <span class="keyword">public</span>:
+<a name="l00057"></a><a class="code" href="a00164.html#682c3978d5cb0620000994f11c44a476">00057</a>     <span class="keyword">typedef</span> T <a class="code" href="a00164.html#682c3978d5cb0620000994f11c44a476">value_type</a>;
+<a name="l00058"></a>00058 
+<a name="l00060"></a><a class="code" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">00060</a>     <span class="keyword">typedef</span> T&amp; <a class="code" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">reference</a>;
+<a name="l00061"></a>00061 
+<a name="l00063"></a><a class="code" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">00063</a>     <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; <a class="code" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">const_reference</a>;
+<a name="l00064"></a>00064 
+<a name="l00066"></a><a class="code" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">00066</a>     <span class="keyword">typedef</span> size_t <a class="code" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">size_type</a>;
+<a name="l00067"></a>00067 
+<a name="l00069"></a><a class="code" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">00069</a>     <span class="keyword">typedef</span> ptrdiff_t <a class="code" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">difference_type</a>;
+<a name="l00070"></a>00070 
+<a name="l00072"></a><a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">00072</a>     <span class="keyword">typedef</span> A <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>;
+<a name="l00073"></a>00073 
+<a name="l00075"></a><a class="code" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">00075</a>     <span class="keyword">explicit</span> <a class="code" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">concurrent_queue</a>(<span class="keyword">const</span> <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>&amp; a = <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>()) : 
+<a name="l00076"></a>00076         my_allocator( a )
+<a name="l00077"></a>00077     {
+<a name="l00078"></a>00078     }
+<a name="l00079"></a>00079 
+<a name="l00081"></a>00081     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> InputIterator&gt;
+<a name="l00082"></a><a class="code" href="a00164.html#25209656c84f2f9b030e2f9162713341">00082</a>     <a class="code" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">concurrent_queue</a>( InputIterator begin, InputIterator end, <span class="keyword">const</span> <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>&amp; a = <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>()) :
+<a name="l00083"></a>00083         my_allocator( a )
+<a name="l00084"></a>00084     {
+<a name="l00085"></a>00085         <span class="keywordflow">for</span>( ; begin != end; ++begin )
+<a name="l00086"></a>00086             this-&gt;internal_push(&amp;*begin);
+<a name="l00087"></a>00087     }
+<a name="l00088"></a>00088     
+<a name="l00090"></a><a class="code" href="a00164.html#8a6b98ea11a867db8ac868f0113ca429">00090</a>     <a class="code" href="a00164.html#7c48a05a94a1f4f98fdfadfbef98ecf6">concurrent_queue</a>( <span class="keyword">const</span> <a class="code" href="a00164.html">concurrent_queue</a>&amp; src, <span class="keyword">const</span> <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>&amp; a = <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a>()) : 
+<a name="l00091"></a>00091         internal::concurrent_queue_base_v3&lt;T&gt;(), my_allocator( a )
+<a name="l00092"></a>00092     {
+<a name="l00093"></a>00093         this-&gt;assign( src );
+<a name="l00094"></a>00094     }
+<a name="l00095"></a>00095     
+<a name="l00097"></a>00097     <a class="code" href="a00164.html#830b33753d6b149c366344e29b2edd8c">~concurrent_queue</a>();
+<a name="l00098"></a>00098 
+<a name="l00100"></a><a class="code" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">00100</a>     <span class="keywordtype">void</span> <a class="code" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">push</a>( <span class="keyword">const</span> T&amp; source ) {
+<a name="l00101"></a>00101         this-&gt;internal_push( &amp;source );
+<a name="l00102"></a>00102     }
+<a name="l00103"></a>00103 
+<a name="l00105"></a>00105 
+<a name="l00107"></a><a class="code" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">00107</a>     <span class="keywordtype">bool</span> <a class="code" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">try_pop</a>( T&amp; result ) {
+<a name="l00108"></a>00108         <span class="keywordflow">return</span> this-&gt;internal_try_pop( &amp;result );
+<a name="l00109"></a>00109     }
+<a name="l00110"></a>00110 
+<a name="l00112"></a><a class="code" href="a00164.html#eaa35a5274606779802e9a669a706260">00112</a>     <a class="code" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">size_type</a> <a class="code" href="a00164.html#eaa35a5274606779802e9a669a706260">unsafe_size</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> this-&gt;internal_size();}
+<a name="l00113"></a>00113 
+<a name="l00115"></a><a class="code" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">00115</a>     <span class="keywordtype">bool</span> <a class="code" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">empty</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> this-&gt;internal_empty();}
+<a name="l00116"></a>00116 
+<a name="l00118"></a>00118     <span class="keywordtype">void</span> <a class="code" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">clear</a>() ;
+<a name="l00119"></a>00119 
+<a name="l00121"></a><a class="code" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">00121</a>     <a class="code" href="a00164.html#5a3956341728eaa558d8827063718cac">allocator_type</a> <a class="code" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">get_allocator</a>()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> this-&gt;my_allocator; }
+<a name="l00122"></a>00122 
+<a name="l00123"></a>00123     <span class="keyword">typedef</span> internal::concurrent_queue_iterator&lt;concurrent_queue,T&gt; iterator;
+<a name="l00124"></a>00124     <span class="keyword">typedef</span> internal::concurrent_queue_iterator&lt;concurrent_queue,const T&gt; const_iterator;
+<a name="l00125"></a>00125 
+<a name="l00126"></a>00126     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00127"></a>00127     <span class="comment">// The iterators are intended only for debugging.  They are slow and not thread safe.</span>
+<a name="l00128"></a>00128     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00129"></a>00129     iterator unsafe_begin() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>);}
+<a name="l00130"></a>00130     iterator unsafe_end() {<span class="keywordflow">return</span> iterator();}
+<a name="l00131"></a>00131     const_iterator unsafe_begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>);}
+<a name="l00132"></a>00132     const_iterator unsafe_end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator();}
+<a name="l00133"></a>00133 } ;
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00136"></a><a class="code" href="a00164.html#830b33753d6b149c366344e29b2edd8c">00136</a> <a class="code" href="a00164.html">concurrent_queue&lt;T,A&gt;::~concurrent_queue</a>() {
+<a name="l00137"></a>00137     <a class="code" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">clear</a>();
+<a name="l00138"></a>00138     this-&gt;internal_finish_clear();
+<a name="l00139"></a>00139 }
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00142"></a><a class="code" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">00142</a> <span class="keywordtype">void</span> <a class="code" href="a00164.html">concurrent_queue&lt;T,A&gt;::clear</a>() {
+<a name="l00143"></a>00143     <span class="keywordflow">while</span>( !<a class="code" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">empty</a>() ) {
+<a name="l00144"></a>00144         T value;
+<a name="l00145"></a>00145         this-&gt;internal_try_pop(&amp;value);
+<a name="l00146"></a>00146     }
+<a name="l00147"></a>00147 }
+<a name="l00148"></a>00148 
+<a name="l00149"></a>00149 } <span class="comment">// namespace strict_ppl</span>
+<a name="l00150"></a>00150     
+<a name="l00152"></a>00152 
+<a name="l00157"></a>00157 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A = cache_aligned_allocator&lt;T&gt; &gt;
+<a name="l00158"></a><a class="code" href="a00159.html">00158</a> <span class="keyword">class </span><a class="code" href="a00159.html">concurrent_bounded_queue</a>: <span class="keyword">public</span> internal::concurrent_queue_base_v3 {
+<a name="l00159"></a>00159     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::concurrent_queue_iterator;
+<a name="l00160"></a>00160 
+<a name="l00162"></a>00162     <span class="keyword">typedef</span> <span class="keyword">typename</span> A::template rebind&lt;char&gt;::other page_allocator_type;
+<a name="l00163"></a>00163     page_allocator_type my_allocator;
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165     <span class="keyword">typedef</span> <span class="keyword">typename</span> concurrent_queue_base_v3::padded_page&lt;T&gt; padded_page;
+<a name="l00166"></a>00166  
+<a name="l00168"></a>00168     <span class="keyword">class </span>destroyer: internal::no_copy {
+<a name="l00169"></a>00169         T&amp; my_value;
+<a name="l00170"></a>00170     <span class="keyword">public</span>:
+<a name="l00171"></a>00171         destroyer( T&amp; value ) : my_value(value) {}
+<a name="l00172"></a>00172         ~destroyer() {my_value.~T();}          
+<a name="l00173"></a>00173     };
+<a name="l00174"></a>00174 
+<a name="l00175"></a>00175     T&amp; get_ref( page&amp; p, size_t index ) {
+<a name="l00176"></a>00176         __TBB_ASSERT( index&lt;items_per_page, NULL );
+<a name="l00177"></a>00177         <span class="keywordflow">return</span> (&amp;static_cast&lt;padded_page*&gt;(static_cast&lt;void*&gt;(&amp;p))-&gt;last)[index];
+<a name="l00178"></a>00178     }
+<a name="l00179"></a>00179 
+<a name="l00180"></a>00180     <span class="comment">/*override*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> copy_item( page&amp; dst, size_t index, <span class="keyword">const</span> <span class="keywordtype">void</span>* src ) {
+<a name="l00181"></a>00181         <span class="keyword">new</span>( &amp;get_ref(dst,index) ) T(*static_cast&lt;const T*&gt;(src)); 
+<a name="l00182"></a>00182     }
+<a name="l00183"></a>00183 
+<a name="l00184"></a>00184     <span class="comment">/*override*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> copy_page_item( page&amp; dst, size_t dindex, <span class="keyword">const</span> page&amp; src, size_t sindex ) {
+<a name="l00185"></a>00185         <span class="keyword">new</span>( &amp;get_ref(dst,dindex) ) T( get_ref( const_cast&lt;page&amp;&gt;(src), sindex ) );
+<a name="l00186"></a>00186     }
+<a name="l00187"></a>00187 
+<a name="l00188"></a>00188     <span class="comment">/*override*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> assign_and_destroy_item( <span class="keywordtype">void</span>* dst, page&amp; src, size_t index ) {
+<a name="l00189"></a>00189         T&amp; from = get_ref(src,index);
+<a name="l00190"></a>00190         destroyer d(from);
+<a name="l00191"></a>00191         *static_cast&lt;T*&gt;(dst) = from;
+<a name="l00192"></a>00192     }
+<a name="l00193"></a>00193 
+<a name="l00194"></a>00194     <span class="comment">/*overide*/</span> <span class="keyword">virtual</span> page *allocate_page() {
+<a name="l00195"></a>00195         size_t n = <span class="keyword">sizeof</span>(padded_page) + (items_per_page-1)*<span class="keyword">sizeof</span>(T);
+<a name="l00196"></a>00196         page *p = reinterpret_cast&lt;page*&gt;(my_allocator.allocate( n ));
+<a name="l00197"></a>00197         <span class="keywordflow">if</span>( !p )
+<a name="l00198"></a>00198             internal::throw_exception(internal::eid_bad_alloc); 
+<a name="l00199"></a>00199         <span class="keywordflow">return</span> p;
+<a name="l00200"></a>00200     }
+<a name="l00201"></a>00201 
+<a name="l00202"></a>00202     <span class="comment">/*override*/</span> <span class="keyword">virtual</span> <span class="keywordtype">void</span> deallocate_page( page *p ) {
+<a name="l00203"></a>00203         size_t n = <span class="keyword">sizeof</span>(padded_page) + items_per_page*<span class="keyword">sizeof</span>(T);
+<a name="l00204"></a>00204         my_allocator.deallocate( reinterpret_cast&lt;char*&gt;(p), n );
+<a name="l00205"></a>00205     }
+<a name="l00206"></a>00206 
+<a name="l00207"></a>00207 <span class="keyword">public</span>:
+<a name="l00209"></a><a class="code" href="a00159.html#98245517a931e5893f6601e66c51fc75">00209</a>     <span class="keyword">typedef</span> T <a class="code" href="a00159.html#98245517a931e5893f6601e66c51fc75">value_type</a>;
+<a name="l00210"></a>00210 
+<a name="l00212"></a><a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">00212</a>     <span class="keyword">typedef</span> A <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>;
+<a name="l00213"></a>00213 
+<a name="l00215"></a><a class="code" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">00215</a>     <span class="keyword">typedef</span> T&amp; <a class="code" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">reference</a>;
+<a name="l00216"></a>00216 
+<a name="l00218"></a><a class="code" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">00218</a>     <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; <a class="code" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">const_reference</a>;
+<a name="l00219"></a>00219 
+<a name="l00221"></a>00221 
+<a name="l00223"></a><a class="code" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">00223</a>     <span class="keyword">typedef</span> std::ptrdiff_t <a class="code" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a>;
+<a name="l00224"></a>00224 
+<a name="l00226"></a><a class="code" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">00226</a>     <span class="keyword">typedef</span> std::ptrdiff_t <a class="code" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">difference_type</a>;
+<a name="l00227"></a>00227 
+<a name="l00229"></a><a class="code" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">00229</a>     <span class="keyword">explicit</span> <a class="code" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a>(<span class="keyword">const</span> <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>&amp; a = <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>()) : 
+<a name="l00230"></a>00230         concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+<a name="l00231"></a>00231     {
+<a name="l00232"></a>00232     }
+<a name="l00233"></a>00233 
+<a name="l00235"></a><a class="code" href="a00159.html#7b7fb414d2eaa8a7f5d68fc4cd63ac50">00235</a>     <a class="code" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a>( <span class="keyword">const</span> <a class="code" href="a00159.html">concurrent_bounded_queue</a>&amp; src, <span class="keyword">const</span> <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>&amp; a = <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>()) : 
+<a name="l00236"></a>00236         concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+<a name="l00237"></a>00237     {
+<a name="l00238"></a>00238         assign( src );
+<a name="l00239"></a>00239     }
+<a name="l00240"></a>00240 
+<a name="l00242"></a>00242     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> InputIterator&gt;
+<a name="l00243"></a><a class="code" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">00243</a>     <a class="code" href="a00159.html#e3525226732963ff0f13e89d8e6dab5b">concurrent_bounded_queue</a>( InputIterator begin, InputIterator end, <span class="keyword">const</span> <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>&amp; a = <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a>()) :
+<a name="l00244"></a>00244         concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+<a name="l00245"></a>00245     {
+<a name="l00246"></a>00246         <span class="keywordflow">for</span>( ; begin != end; ++begin )
+<a name="l00247"></a>00247             internal_push_if_not_full(&amp;*begin);
+<a name="l00248"></a>00248     }
+<a name="l00249"></a>00249 
+<a name="l00251"></a>00251     <a class="code" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">~concurrent_bounded_queue</a>();
+<a name="l00252"></a>00252 
+<a name="l00254"></a><a class="code" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">00254</a>     <span class="keywordtype">void</span> <a class="code" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">push</a>( <span class="keyword">const</span> T&amp; source ) {
+<a name="l00255"></a>00255         internal_push( &amp;source );
+<a name="l00256"></a>00256     }
+<a name="l00257"></a>00257 
+<a name="l00259"></a>00259 
+<a name="l00260"></a><a class="code" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">00260</a>     <span class="keywordtype">void</span> <a class="code" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">pop</a>( T&amp; destination ) {
+<a name="l00261"></a>00261         internal_pop( &amp;destination );
+<a name="l00262"></a>00262     }
+<a name="l00263"></a>00263 
+<a name="l00265"></a>00265 
+<a name="l00267"></a><a class="code" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">00267</a>     <span class="keywordtype">bool</span> <a class="code" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">try_push</a>( <span class="keyword">const</span> T&amp; source ) {
+<a name="l00268"></a>00268         <span class="keywordflow">return</span> internal_push_if_not_full( &amp;source );
+<a name="l00269"></a>00269     }
+<a name="l00270"></a>00270 
+<a name="l00272"></a>00272 
+<a name="l00274"></a><a class="code" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">00274</a>     <span class="keywordtype">bool</span> <a class="code" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop</a>( T&amp; destination ) {
+<a name="l00275"></a>00275         <span class="keywordflow">return</span> internal_pop_if_present( &amp;destination );
+<a name="l00276"></a>00276     }
+<a name="l00277"></a>00277 
+<a name="l00279"></a>00279 
+<a name="l00282"></a><a class="code" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">00282</a>     <a class="code" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> <a class="code" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">size</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> internal_size();}
+<a name="l00283"></a>00283 
+<a name="l00285"></a><a class="code" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">00285</a>     <span class="keywordtype">bool</span> <a class="code" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">empty</a>()<span class="keyword"> const </span>{<span class="keywordflow">return</span> internal_empty();}
+<a name="l00286"></a>00286 
+<a name="l00288"></a><a class="code" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">00288</a>     <a class="code" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> <a class="code" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">capacity</a>()<span class="keyword"> const </span>{
+<a name="l00289"></a>00289         <span class="keywordflow">return</span> my_capacity;
+<a name="l00290"></a>00290     }
+<a name="l00291"></a>00291 
+<a name="l00293"></a>00293 
+<a name="l00295"></a><a class="code" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">00295</a>     <span class="keywordtype">void</span> <a class="code" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">set_capacity</a>( <a class="code" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">size_type</a> new_capacity ) {
+<a name="l00296"></a>00296         internal_set_capacity( new_capacity, <span class="keyword">sizeof</span>(T) );
+<a name="l00297"></a>00297     }
+<a name="l00298"></a>00298 
+<a name="l00300"></a><a class="code" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">00300</a>     <a class="code" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">allocator_type</a> <a class="code" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">get_allocator</a>()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> this-&gt;my_allocator; }
+<a name="l00301"></a>00301 
+<a name="l00303"></a>00303     <span class="keywordtype">void</span> <a class="code" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">clear</a>() ;
+<a name="l00304"></a>00304 
+<a name="l00305"></a>00305     <span class="keyword">typedef</span> internal::concurrent_queue_iterator&lt;concurrent_bounded_queue,T&gt; iterator;
+<a name="l00306"></a>00306     <span class="keyword">typedef</span> internal::concurrent_queue_iterator&lt;concurrent_bounded_queue,const T&gt; const_iterator;
+<a name="l00307"></a>00307 
+<a name="l00308"></a>00308     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00309"></a>00309     <span class="comment">// The iterators are intended only for debugging.  They are slow and not thread safe.</span>
+<a name="l00310"></a>00310     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00311"></a>00311     iterator unsafe_begin() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>);}
+<a name="l00312"></a>00312     iterator unsafe_end() {<span class="keywordflow">return</span> iterator();}
+<a name="l00313"></a>00313     const_iterator unsafe_begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>);}
+<a name="l00314"></a>00314     const_iterator unsafe_end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator();}
+<a name="l00315"></a>00315 
+<a name="l00316"></a>00316 }; 
+<a name="l00317"></a>00317 
+<a name="l00318"></a>00318 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00319"></a><a class="code" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">00319</a> <a class="code" href="a00159.html">concurrent_bounded_queue&lt;T,A&gt;::~concurrent_bounded_queue</a>() {
+<a name="l00320"></a>00320     <a class="code" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">clear</a>();
+<a name="l00321"></a>00321     internal_finish_clear();
+<a name="l00322"></a>00322 }
+<a name="l00323"></a>00323 
+<a name="l00324"></a>00324 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00325"></a><a class="code" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">00325</a> <span class="keywordtype">void</span> <a class="code" href="a00159.html">concurrent_bounded_queue&lt;T,A&gt;::clear</a>() {
+<a name="l00326"></a>00326     <span class="keywordflow">while</span>( !<a class="code" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">empty</a>() ) {
+<a name="l00327"></a>00327         T value;
+<a name="l00328"></a>00328         internal_pop_if_present(&amp;value);
+<a name="l00329"></a>00329     }
+<a name="l00330"></a>00330 }
+<a name="l00331"></a>00331 
+<a name="l00332"></a>00332 <span class="keyword">namespace </span>deprecated {
+<a name="l00333"></a>00333 
+<a name="l00335"></a>00335 
+<a name="l00340"></a>00340 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A = cache_aligned_allocator&lt;T&gt; &gt; 
+<a name="l00341"></a><a class="code" href="a00165.html">00341</a> <span class="keyword">class </span><a class="code" href="a00165.html">concurrent_queue</a>: <span class="keyword">public</span> <a class="code" href="a00159.html">concurrent_bounded_queue</a>&lt;T,A&gt; {
+<a name="l00342"></a>00342 <span class="preprocessor">#if !__TBB_TEMPLATE_FRIENDS_BROKEN</span>
+<a name="l00343"></a>00343 <span class="preprocessor"></span>    <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::concurrent_queue_iterator;
+<a name="l00344"></a>00344 <span class="preprocessor">#endif </span>
+<a name="l00345"></a>00345 <span class="preprocessor"></span>
+<a name="l00346"></a>00346 <span class="keyword">public</span>:
+<a name="l00348"></a><a class="code" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">00348</a>     <span class="keyword">explicit</span> <a class="code" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">concurrent_queue</a>(<span class="keyword">const</span> A&amp; a = A()) : 
+<a name="l00349"></a>00349         <a class="code" href="a00159.html">concurrent_bounded_queue</a>&lt;T,A&gt;( a )
+<a name="l00350"></a>00350     {
+<a name="l00351"></a>00351     }
+<a name="l00352"></a>00352 
+<a name="l00354"></a><a class="code" href="a00165.html#fc092b9082f233482f3513fc3bb670f7">00354</a>     <a class="code" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">concurrent_queue</a>( <span class="keyword">const</span> <a class="code" href="a00165.html">concurrent_queue</a>&amp; src, <span class="keyword">const</span> A&amp; a = A()) : 
+<a name="l00355"></a>00355         <a class="code" href="a00159.html">concurrent_bounded_queue</a>&lt;T,A&gt;( src, a )
+<a name="l00356"></a>00356     {
+<a name="l00357"></a>00357     }
+<a name="l00358"></a>00358 
+<a name="l00360"></a>00360     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> InputIterator&gt;
+<a name="l00361"></a><a class="code" href="a00165.html#9102b897776bd2d9e908e6604ff16b5f">00361</a>     <a class="code" href="a00165.html#aaf19bd7337b72f3131ece60f7315ef7">concurrent_queue</a>( InputIterator b <span class="comment">/*begin*/</span>, InputIterator e <span class="comment">/*end*/</span>, <span class="keyword">const</span> A&amp; a = A()) :
+<a name="l00362"></a>00362         <a class="code" href="a00159.html">concurrent_bounded_queue</a>&lt;T,A&gt;( b, e, a )
+<a name="l00363"></a>00363     {
+<a name="l00364"></a>00364     }
+<a name="l00365"></a>00365 
+<a name="l00367"></a>00367 
+<a name="l00369"></a><a class="code" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">00369</a>     <span class="keywordtype">bool</span> <a class="code" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">push_if_not_full</a>( <span class="keyword">const</span> T&amp; source ) {
+<a name="l00370"></a>00370         <span class="keywordflow">return</span> this-&gt;<a class="code" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">try_push</a>( source );
+<a name="l00371"></a>00371     }
+<a name="l00372"></a>00372 
+<a name="l00374"></a>00374 
+<a name="l00378"></a><a class="code" href="a00165.html#48da3536245318af6cb5fd58bac78039">00378</a>     <span class="keywordtype">bool</span> <a class="code" href="a00165.html#48da3536245318af6cb5fd58bac78039">pop_if_present</a>( T&amp; destination ) {
+<a name="l00379"></a>00379         <span class="keywordflow">return</span> this-&gt;<a class="code" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">try_pop</a>( destination );
+<a name="l00380"></a>00380     }
+<a name="l00381"></a>00381 
+<a name="l00382"></a>00382     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00159.html">concurrent_bounded_queue&lt;T,A&gt;::iterator</a> iterator;
+<a name="l00383"></a>00383     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00159.html">concurrent_bounded_queue&lt;T,A&gt;::const_iterator</a> const_iterator;
+<a name="l00384"></a>00384     <span class="comment">//</span>
+<a name="l00385"></a>00385     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00386"></a>00386     <span class="comment">// The iterators are intended only for debugging.  They are slow and not thread safe.</span>
+<a name="l00387"></a>00387     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00388"></a>00388     iterator begin() {<span class="keywordflow">return</span> this-&gt;unsafe_begin();}
+<a name="l00389"></a>00389     iterator end() {<span class="keywordflow">return</span> this-&gt;unsafe_end();}
+<a name="l00390"></a>00390     const_iterator begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> this-&gt;unsafe_begin();}
+<a name="l00391"></a>00391     const_iterator end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> this-&gt;unsafe_end();}
+<a name="l00392"></a>00392 }; 
+<a name="l00393"></a>00393 
+<a name="l00394"></a>00394 }
+<a name="l00395"></a>00395     
+<a name="l00396"></a>00396 
+<a name="l00397"></a>00397 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00398"></a>00398 <span class="preprocessor"></span><span class="keyword">using</span> deprecated::concurrent_queue;
+<a name="l00399"></a>00399 <span class="preprocessor">#else</span>
+<a name="l00400"></a>00400 <span class="preprocessor"></span><span class="keyword">using</span> strict_ppl::concurrent_queue;    
+<a name="l00401"></a>00401 <span class="preprocessor">#endif</span>
+<a name="l00402"></a>00402 <span class="preprocessor"></span>
+<a name="l00403"></a>00403 } <span class="comment">// namespace tbb</span>
+<a name="l00404"></a>00404 
+<a name="l00405"></a>00405 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_concurrent_queue_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00342.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00342.html
new file mode 100644 (file)
index 0000000..0c97317
--- /dev/null
@@ -0,0 +1,263 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>concurrent_unordered_map.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>concurrent_unordered_map.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="comment">/* Container implementations in this header are based on PPL implementations</span>
+<a name="l00022"></a>00022 <span class="comment">   provided by Microsoft. */</span>
+<a name="l00023"></a>00023 
+<a name="l00024"></a>00024 <span class="preprocessor">#ifndef __TBB_concurrent_unordered_map_H</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_unordered_map_H</span>
+<a name="l00026"></a>00026 <span class="preprocessor"></span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "_concurrent_unordered_internal.h"</span>
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>tbb
+<a name="l00030"></a>00030 {
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 <span class="comment">// Template class for hash compare</span>
+<a name="l00033"></a>00033 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key&gt;
+<a name="l00034"></a>00034 <span class="keyword">class </span>tbb_hash
+<a name="l00035"></a>00035 {
+<a name="l00036"></a>00036 <span class="keyword">public</span>:
+<a name="l00037"></a>00037     tbb_hash() {}
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039     size_t operator()(<span class="keyword">const</span> Key&amp; key)<span class="keyword"> const</span>
+<a name="l00040"></a>00040 <span class="keyword">    </span>{
+<a name="l00041"></a>00041         <span class="keywordflow">return</span> tbb_hasher(key);
+<a name="l00042"></a>00042     }
+<a name="l00043"></a>00043 };
+<a name="l00044"></a>00044 
+<a name="l00045"></a>00045 <span class="keyword">namespace </span>interface5 {
+<a name="l00046"></a>00046 
+<a name="l00047"></a>00047 <span class="comment">// Template class for hash map traits</span>
+<a name="l00048"></a>00048 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> Hash_compare, <span class="keyword">typename</span> Allocator, <span class="keywordtype">bool</span> Allow_multimapping&gt;
+<a name="l00049"></a>00049 <span class="keyword">class </span>concurrent_unordered_map_traits
+<a name="l00050"></a>00050 {
+<a name="l00051"></a>00051 <span class="keyword">protected</span>:
+<a name="l00052"></a>00052     <span class="keyword">typedef</span> std::pair&lt;const Key, T&gt; value_type;
+<a name="l00053"></a>00053     <span class="keyword">typedef</span> Key key_type;
+<a name="l00054"></a>00054     <span class="keyword">typedef</span> Hash_compare hash_compare;
+<a name="l00055"></a>00055     <span class="keyword">typedef</span> <span class="keyword">typename</span> Allocator::template rebind&lt;value_type&gt;::other allocator_type;
+<a name="l00056"></a>00056     <span class="keyword">enum</span> { allow_multimapping = Allow_multimapping };
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058     concurrent_unordered_map_traits() : my_hash_compare() {}
+<a name="l00059"></a>00059     concurrent_unordered_map_traits(<span class="keyword">const</span> hash_compare&amp; hc) : my_hash_compare(hc) {}
+<a name="l00060"></a>00060 
+<a name="l00061"></a>00061     <span class="keyword">class </span>value_compare : <span class="keyword">public</span> std::binary_function&lt;value_type, value_type, bool&gt;
+<a name="l00062"></a>00062     {
+<a name="l00063"></a>00063         <span class="keyword">friend</span> <span class="keyword">class </span>concurrent_unordered_map_traits&lt;Key, T, Hash_compare, Allocator, Allow_multimapping&gt;;
+<a name="l00064"></a>00064 
+<a name="l00065"></a>00065     <span class="keyword">public</span>:
+<a name="l00066"></a>00066         <span class="keywordtype">bool</span> operator()(<span class="keyword">const</span> value_type&amp; left, <span class="keyword">const</span> value_type&amp; right)<span class="keyword"> const</span>
+<a name="l00067"></a>00067 <span class="keyword">        </span>{
+<a name="l00068"></a>00068             <span class="keywordflow">return</span> (my_hash_compare(left.first, right.first));
+<a name="l00069"></a>00069         }
+<a name="l00070"></a>00070 
+<a name="l00071"></a>00071         value_compare(<span class="keyword">const</span> hash_compare&amp; comparator) : my_hash_compare(comparator) {}
+<a name="l00072"></a>00072 
+<a name="l00073"></a>00073     <span class="keyword">protected</span>:
+<a name="l00074"></a>00074         hash_compare my_hash_compare;    <span class="comment">// the comparator predicate for keys</span>
+<a name="l00075"></a>00075     };
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077     <span class="keyword">template</span>&lt;<span class="keyword">class</span> Type1, <span class="keyword">class</span> Type2&gt;
+<a name="l00078"></a>00078     <span class="keyword">static</span> <span class="keyword">const</span> Key&amp; get_key(<span class="keyword">const</span> std::pair&lt;Type1, Type2&gt;&amp; value) {
+<a name="l00079"></a>00079         <span class="keywordflow">return</span> (value.first);
+<a name="l00080"></a>00080     }
+<a name="l00081"></a>00081 
+<a name="l00082"></a>00082     hash_compare my_hash_compare; <span class="comment">// the comparator predicate for keys</span>
+<a name="l00083"></a>00083 };
+<a name="l00084"></a>00084 
+<a name="l00085"></a>00085 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Key, <span class="keyword">typename</span> T, <span class="keyword">typename</span> Hasher = tbb_hash&lt;Key&gt;, <span class="keyword">typename</span> Key_equality = std::equal_to&lt;Key&gt;, <span class="keyword">typename</span> Allocator = tbb::tbb_allocator&lt;std::pair&lt;const Key, T&gt; &gt; &gt;
+<a name="l00086"></a>00086 <span class="keyword">class </span>concurrent_unordered_map : <span class="keyword">public</span> internal::concurrent_unordered_base&lt; concurrent_unordered_map_traits&lt;Key, T, internal::hash_compare&lt;Key, Hasher, Key_equality&gt;, Allocator, false&gt; &gt;
+<a name="l00087"></a>00087 {
+<a name="l00088"></a>00088     <span class="comment">// Base type definitions</span>
+<a name="l00089"></a>00089     <span class="keyword">typedef</span> internal::hash_compare&lt;Key, Hasher, Key_equality&gt; hash_compare;
+<a name="l00090"></a>00090     <span class="keyword">typedef</span> internal::concurrent_unordered_base&lt; concurrent_unordered_map_traits&lt;Key, T, hash_compare, Allocator, false&gt; &gt; base_type;
+<a name="l00091"></a>00091     <span class="keyword">typedef</span> concurrent_unordered_map_traits&lt;Key, T, internal::hash_compare&lt;Key, Hasher, Key_equality&gt;, Allocator, <span class="keyword">false</span>&gt; traits_type;
+<a name="l00092"></a>00092     <span class="keyword">using</span> traits_type::my_hash_compare;
+<a name="l00093"></a>00093 <span class="preprocessor">#if __TBB_EXTRA_DEBUG</span>
+<a name="l00094"></a>00094 <span class="preprocessor"></span><span class="keyword">public</span>:
+<a name="l00095"></a>00095 <span class="preprocessor">#endif</span>
+<a name="l00096"></a>00096 <span class="preprocessor"></span>    <span class="keyword">using</span> traits_type::allow_multimapping;
+<a name="l00097"></a>00097 <span class="keyword">public</span>:
+<a name="l00098"></a>00098     <span class="keyword">using</span> base_type::end;
+<a name="l00099"></a>00099     <span class="keyword">using</span> base_type::find;
+<a name="l00100"></a>00100     <span class="keyword">using</span> base_type::insert;
+<a name="l00101"></a>00101 
+<a name="l00102"></a>00102     <span class="comment">// Type definitions</span>
+<a name="l00103"></a>00103     <span class="keyword">typedef</span> Key key_type;
+<a name="l00104"></a>00104     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::value_type value_type;
+<a name="l00105"></a>00105     <span class="keyword">typedef</span> T mapped_type;
+<a name="l00106"></a>00106     <span class="keyword">typedef</span> Hasher hasher;
+<a name="l00107"></a>00107     <span class="keyword">typedef</span> Key_equality key_equal;
+<a name="l00108"></a>00108     <span class="keyword">typedef</span> hash_compare key_compare;
+<a name="l00109"></a>00109 
+<a name="l00110"></a>00110     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::allocator_type allocator_type;
+<a name="l00111"></a>00111     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::pointer pointer;
+<a name="l00112"></a>00112     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::const_pointer const_pointer;
+<a name="l00113"></a>00113     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::reference reference;
+<a name="l00114"></a>00114     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::const_reference const_reference;
+<a name="l00115"></a>00115 
+<a name="l00116"></a>00116     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::size_type size_type;
+<a name="l00117"></a>00117     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::difference_type difference_type;
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::iterator iterator;
+<a name="l00120"></a>00120     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::const_iterator const_iterator;
+<a name="l00121"></a>00121     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::iterator local_iterator;
+<a name="l00122"></a>00122     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_type::const_iterator const_local_iterator;
+<a name="l00123"></a>00123 
+<a name="l00124"></a>00124     <span class="comment">// Construction/destruction/copying</span>
+<a name="l00125"></a>00125     <span class="keyword">explicit</span> concurrent_unordered_map(size_type n_of_buckets = 8, <span class="keyword">const</span> hasher&amp; a_hasher = hasher(),
+<a name="l00126"></a>00126         <span class="keyword">const</span> key_equal&amp; a_keyeq = key_equal(), <span class="keyword">const</span> allocator_type&amp; a = allocator_type())
+<a name="l00127"></a>00127         : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
+<a name="l00128"></a>00128     {
+<a name="l00129"></a>00129     }
+<a name="l00130"></a>00130 
+<a name="l00131"></a>00131     concurrent_unordered_map(<span class="keyword">const</span> Allocator&amp; a) : base_type(8, key_compare(), a)
+<a name="l00132"></a>00132     {
+<a name="l00133"></a>00133     }
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Iterator&gt;
+<a name="l00136"></a>00136     concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = 8, <span class="keyword">const</span> hasher&amp; a_hasher = hasher(),
+<a name="l00137"></a>00137         <span class="keyword">const</span> key_equal&amp; a_keyeq = key_equal(), <span class="keyword">const</span> allocator_type&amp; a = allocator_type())
+<a name="l00138"></a>00138         : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
+<a name="l00139"></a>00139     {
+<a name="l00140"></a>00140         <span class="keywordflow">for</span> (; first != last; ++first)
+<a name="l00141"></a>00141             base_type::insert(*first);
+<a name="l00142"></a>00142     }
+<a name="l00143"></a>00143 
+<a name="l00144"></a>00144     concurrent_unordered_map(<span class="keyword">const</span> concurrent_unordered_map&amp; table) : base_type(table)
+<a name="l00145"></a>00145     {
+<a name="l00146"></a>00146     }
+<a name="l00147"></a>00147 
+<a name="l00148"></a>00148     concurrent_unordered_map(<span class="keyword">const</span> concurrent_unordered_map&amp; table, <span class="keyword">const</span> Allocator&amp; a)
+<a name="l00149"></a>00149         : base_type(table, a)
+<a name="l00150"></a>00150     {
+<a name="l00151"></a>00151     }
+<a name="l00152"></a>00152 
+<a name="l00153"></a>00153     concurrent_unordered_map&amp; operator=(<span class="keyword">const</span> concurrent_unordered_map&amp; table)
+<a name="l00154"></a>00154     {
+<a name="l00155"></a>00155         base_type::operator=(table);
+<a name="l00156"></a>00156         <span class="keywordflow">return</span> (*<span class="keyword">this</span>);
+<a name="l00157"></a>00157     }
+<a name="l00158"></a>00158 
+<a name="l00159"></a>00159     iterator unsafe_erase(const_iterator where)
+<a name="l00160"></a>00160     {
+<a name="l00161"></a>00161         <span class="keywordflow">return</span> base_type::unsafe_erase(where);
+<a name="l00162"></a>00162     }
+<a name="l00163"></a>00163 
+<a name="l00164"></a>00164     size_type unsafe_erase(<span class="keyword">const</span> key_type&amp; key)
+<a name="l00165"></a>00165     {
+<a name="l00166"></a>00166         <span class="keywordflow">return</span> base_type::unsafe_erase(key);
+<a name="l00167"></a>00167     }
+<a name="l00168"></a>00168 
+<a name="l00169"></a>00169     iterator unsafe_erase(const_iterator first, const_iterator last)
+<a name="l00170"></a>00170     {
+<a name="l00171"></a>00171         <span class="keywordflow">return</span> base_type::unsafe_erase(first, last);
+<a name="l00172"></a>00172     }
+<a name="l00173"></a>00173 
+<a name="l00174"></a>00174     <span class="keywordtype">void</span> swap(concurrent_unordered_map&amp; table)
+<a name="l00175"></a>00175     {
+<a name="l00176"></a>00176         base_type::swap(table);
+<a name="l00177"></a>00177     }
+<a name="l00178"></a>00178 
+<a name="l00179"></a>00179     <span class="comment">// Observers</span>
+<a name="l00180"></a>00180     hasher hash_function()<span class="keyword"> const</span>
+<a name="l00181"></a>00181 <span class="keyword">    </span>{
+<a name="l00182"></a>00182         <span class="keywordflow">return</span> my_hash_compare.my_hash_object;
+<a name="l00183"></a>00183     }
+<a name="l00184"></a>00184 
+<a name="l00185"></a>00185     key_equal key_eq()<span class="keyword"> const</span>
+<a name="l00186"></a>00186 <span class="keyword">    </span>{
+<a name="l00187"></a>00187         <span class="keywordflow">return</span> my_hash_compare.my_key_compare_object;
+<a name="l00188"></a>00188     }
+<a name="l00189"></a>00189 
+<a name="l00190"></a>00190     mapped_type&amp; operator[](<span class="keyword">const</span> key_type&amp; key)
+<a name="l00191"></a>00191     {
+<a name="l00192"></a>00192         iterator where = find(key);
+<a name="l00193"></a>00193 
+<a name="l00194"></a>00194         <span class="keywordflow">if</span> (where == end())
+<a name="l00195"></a>00195         {
+<a name="l00196"></a>00196             where = insert(std::pair&lt;key_type, mapped_type&gt;(key, mapped_type())).first;
+<a name="l00197"></a>00197         }
+<a name="l00198"></a>00198 
+<a name="l00199"></a>00199         <span class="keywordflow">return</span> ((*where).second);
+<a name="l00200"></a>00200     }
+<a name="l00201"></a>00201 
+<a name="l00202"></a>00202     mapped_type&amp; at(<span class="keyword">const</span> key_type&amp; key)
+<a name="l00203"></a>00203     {
+<a name="l00204"></a>00204         iterator where = find(key);
+<a name="l00205"></a>00205 
+<a name="l00206"></a>00206         <span class="keywordflow">if</span> (where == end())
+<a name="l00207"></a>00207         {
+<a name="l00208"></a>00208             tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+<a name="l00209"></a>00209         }
+<a name="l00210"></a>00210 
+<a name="l00211"></a>00211         <span class="keywordflow">return</span> ((*where).second);
+<a name="l00212"></a>00212     }
+<a name="l00213"></a>00213 
+<a name="l00214"></a>00214     <span class="keyword">const</span> mapped_type&amp; at(<span class="keyword">const</span> key_type&amp; key)<span class="keyword"> const</span>
+<a name="l00215"></a>00215 <span class="keyword">    </span>{
+<a name="l00216"></a>00216         const_iterator where = find(key);
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218         <span class="keywordflow">if</span> (where == end())
+<a name="l00219"></a>00219         {
+<a name="l00220"></a>00220             tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+<a name="l00221"></a>00221         }
+<a name="l00222"></a>00222 
+<a name="l00223"></a>00223         <span class="keywordflow">return</span> ((*where).second);
+<a name="l00224"></a>00224     }
+<a name="l00225"></a>00225 };
+<a name="l00226"></a>00226 
+<a name="l00227"></a>00227 } <span class="comment">// namespace interface5</span>
+<a name="l00228"></a>00228 
+<a name="l00229"></a>00229 <span class="keyword">using</span> interface5::concurrent_unordered_map;
+<a name="l00230"></a>00230 
+<a name="l00231"></a>00231 } <span class="comment">// namespace tbb</span>
+<a name="l00232"></a>00232 
+<a name="l00233"></a>00233 <span class="preprocessor">#endif// __TBB_concurrent_unordered_map_H</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00347.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00347.html
new file mode 100644 (file)
index 0000000..b3d2eff
--- /dev/null
@@ -0,0 +1,924 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>concurrent_vector.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>concurrent_vector.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_concurrent_vector_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_concurrent_vector_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00030"></a>00030 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00034"></a>00034 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include &lt;algorithm&gt;</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span>
+<a name="l00045"></a>00045 <span class="preprocessor">#if _MSC_VER==1500 &amp;&amp; !__INTEL_COMPILER</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span>    <span class="comment">// VS2008/VC9 seems to have an issue; limits pull in math.h</span>
+<a name="l00047"></a>00047 <span class="preprocessor">    #pragma warning( push )</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning( disable: 4985 )</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span><span class="preprocessor">#include &lt;limits&gt;</span> <span class="comment">/* std::numeric_limits */</span>
+<a name="l00051"></a>00051 <span class="preprocessor">#if _MSC_VER==1500 &amp;&amp; !__INTEL_COMPILER</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning( pop )</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00054"></a>00054 <span class="preprocessor"></span>
+<a name="l00055"></a>00055 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER) &amp;&amp; defined(_Wp64)</span>
+<a name="l00056"></a>00056 <span class="preprocessor"></span>    <span class="comment">// Workaround for overzealous compiler warnings in /Wp64 mode</span>
+<a name="l00057"></a>00057 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00058"></a>00058 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4267)</span>
+<a name="l00059"></a>00059 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span>
+<a name="l00061"></a>00061 <span class="keyword">namespace </span>tbb {
+<a name="l00062"></a>00062 
+<a name="l00063"></a>00063 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A = cache_aligned_allocator&lt;T&gt; &gt;
+<a name="l00064"></a>00064 <span class="keyword">class </span>concurrent_vector;
+<a name="l00065"></a>00065 
+<a name="l00067"></a>00067 <span class="keyword">namespace </span>internal {
+<a name="l00068"></a>00068 
+<a name="l00070"></a>00070     <span class="keyword">static</span> <span class="keywordtype">void</span> *<span class="keyword">const</span> vector_allocation_error_flag = reinterpret_cast&lt;void*&gt;(size_t(63));
+<a name="l00071"></a>00071 
+<a name="l00073"></a>00073     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC itt_load_pointer_v3( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00074"></a>00074 
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077     <span class="keyword">class </span>concurrent_vector_base_v3 {
+<a name="l00078"></a>00078     <span class="keyword">protected</span>:
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080         <span class="comment">// Basic types declarations</span>
+<a name="l00081"></a>00081         <span class="keyword">typedef</span> size_t segment_index_t;
+<a name="l00082"></a>00082         <span class="keyword">typedef</span> size_t size_type;
+<a name="l00083"></a>00083 
+<a name="l00084"></a>00084         <span class="comment">// Using enumerations due to Mac linking problems of static const variables</span>
+<a name="l00085"></a>00085         <span class="keyword">enum</span> {
+<a name="l00086"></a>00086             <span class="comment">// Size constants</span>
+<a name="l00087"></a>00087             default_initial_segments = 1, <span class="comment">// 2 initial items</span>
+<a name="l00089"></a>00089 <span class="comment"></span>            pointers_per_short_table = 3, <span class="comment">// to fit into 8 words of entire structure</span>
+<a name="l00090"></a>00090             pointers_per_long_table = <span class="keyword">sizeof</span>(segment_index_t) * 8 <span class="comment">// one segment per bit</span>
+<a name="l00091"></a>00091         };
+<a name="l00092"></a>00092 
+<a name="l00093"></a>00093         <span class="comment">// Segment pointer. Can be zero-initialized</span>
+<a name="l00094"></a>00094         <span class="keyword">struct </span>segment_t {
+<a name="l00095"></a>00095             <span class="keywordtype">void</span>* array;
+<a name="l00096"></a>00096 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00097"></a>00097 <span class="preprocessor"></span>            ~segment_t() {
+<a name="l00098"></a>00098                 __TBB_ASSERT( array &lt;= internal::vector_allocation_error_flag, <span class="stringliteral">"should have been freed by clear"</span> );
+<a name="l00099"></a>00099             }
+<a name="l00100"></a>00100 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00101"></a>00101         };
+<a name="l00102"></a>00102  
+<a name="l00103"></a>00103         <span class="comment">// Data fields</span>
+<a name="l00104"></a>00104 
+<a name="l00106"></a>00106         <span class="keywordtype">void</span>* (*vector_allocator_ptr)(concurrent_vector_base_v3 &amp;, size_t);
+<a name="l00107"></a>00107 
+<a name="l00109"></a>00109         atomic&lt;size_type&gt; my_first_block;
+<a name="l00110"></a>00110 
+<a name="l00112"></a>00112         atomic&lt;size_type&gt; my_early_size;
+<a name="l00113"></a>00113 
+<a name="l00115"></a>00115         atomic&lt;segment_t*&gt; my_segment;
+<a name="l00116"></a>00116 
+<a name="l00118"></a>00118         segment_t my_storage[pointers_per_short_table];
+<a name="l00119"></a>00119 
+<a name="l00120"></a>00120         <span class="comment">// Methods</span>
+<a name="l00121"></a>00121 
+<a name="l00122"></a>00122         concurrent_vector_base_v3() {
+<a name="l00123"></a>00123             my_early_size = 0;
+<a name="l00124"></a>00124             my_first_block = 0; <span class="comment">// here is not default_initial_segments</span>
+<a name="l00125"></a>00125             <span class="keywordflow">for</span>( segment_index_t i = 0; i &lt; pointers_per_short_table; i++)
+<a name="l00126"></a>00126                 my_storage[i].array = NULL;
+<a name="l00127"></a>00127             my_segment = my_storage;
+<a name="l00128"></a>00128         }
+<a name="l00129"></a>00129         __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
+<a name="l00130"></a>00130 
+<a name="l00131"></a>00131         <span class="keyword">static</span> segment_index_t segment_index_of( size_type index ) {
+<a name="l00132"></a>00132             <span class="keywordflow">return</span> segment_index_t( __TBB_Log2( index|1 ) );
+<a name="l00133"></a>00133         }
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135         <span class="keyword">static</span> segment_index_t segment_base( segment_index_t k ) {
+<a name="l00136"></a>00136             <span class="keywordflow">return</span> (segment_index_t(1)&lt;&lt;k &amp; ~segment_index_t(1));
+<a name="l00137"></a>00137         }
+<a name="l00138"></a>00138 
+<a name="l00139"></a>00139         <span class="keyword">static</span> <span class="keyword">inline</span> segment_index_t segment_base_index_of( segment_index_t &amp;index ) {
+<a name="l00140"></a>00140             segment_index_t k = segment_index_of( index );
+<a name="l00141"></a>00141             index -= segment_base(k);
+<a name="l00142"></a>00142             <span class="keywordflow">return</span> k;
+<a name="l00143"></a>00143         }
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145         <span class="keyword">static</span> size_type segment_size( segment_index_t k ) {
+<a name="l00146"></a>00146             <span class="keywordflow">return</span> segment_index_t(1)&lt;&lt;k; <span class="comment">// fake value for k==0</span>
+<a name="l00147"></a>00147         }
+<a name="l00148"></a>00148 
+<a name="l00150"></a>00150         <span class="keyword">typedef</span> void (__TBB_EXPORTED_FUNC *internal_array_op1)(<span class="keywordtype">void</span>* begin, size_type n );
+<a name="l00151"></a>00151 
+<a name="l00153"></a>00153         <span class="keyword">typedef</span> void (__TBB_EXPORTED_FUNC *internal_array_op2)(<span class="keywordtype">void</span>* dst, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n );
+<a name="l00154"></a>00154 
+<a name="l00156"></a>00156         <span class="keyword">struct </span>internal_segments_table {
+<a name="l00157"></a>00157             segment_index_t first_block;
+<a name="l00158"></a>00158             <span class="keywordtype">void</span>* table[pointers_per_long_table];
+<a name="l00159"></a>00159         };
+<a name="l00160"></a>00160 
+<a name="l00161"></a>00161         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
+<a name="l00162"></a>00162         size_type __TBB_EXPORTED_METHOD internal_capacity() <span class="keyword">const</span>;
+<a name="l00163"></a>00163         <span class="keywordtype">void</span> internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, <span class="keyword">const</span> <span class="keywordtype">void</span> *src );
+<a name="l00164"></a>00164         size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, <span class="keyword">const</span> <span class="keywordtype">void</span> *src );
+<a name="l00165"></a>00165         <span class="keywordtype">void</span>* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type&amp; index );
+<a name="l00166"></a>00166         segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
+<a name="l00167"></a>00167         <span class="keywordtype">void</span>* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, <span class="keywordtype">void</span> *table, internal_array_op1 destroy, internal_array_op2 copy );
+<a name="l00168"></a>00168         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_copy( <span class="keyword">const</span> concurrent_vector_base_v3&amp; src, size_type element_size, internal_array_op2 copy );
+<a name="l00169"></a>00169         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_assign( <span class="keyword">const</span> concurrent_vector_base_v3&amp; src, size_type element_size,
+<a name="l00170"></a>00170                               internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
+<a name="l00172"></a>00172         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_throw_exception(size_type) <span class="keyword">const</span>;
+<a name="l00173"></a>00173         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3&amp; v);
+<a name="l00174"></a>00174 
+<a name="l00175"></a>00175         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, <span class="keyword">const</span> <span class="keywordtype">void</span> *src,
+<a name="l00176"></a>00176                                                     internal_array_op1 destroy, internal_array_op2 init );
+<a name="l00177"></a>00177         size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, <span class="keyword">const</span> <span class="keywordtype">void</span> *src );
+<a name="l00178"></a>00178 
+<a name="l00180"></a>00180         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, <span class="keyword">const</span> <span class="keywordtype">void</span> *src );
+<a name="l00181"></a>00181 <span class="keyword">private</span>:
+<a name="l00183"></a>00183         <span class="keyword">class </span>helper;
+<a name="l00184"></a>00184         <span class="keyword">friend</span> <span class="keyword">class </span>helper;
+<a name="l00185"></a>00185     };
+<a name="l00186"></a>00186     
+<a name="l00187"></a>00187     <span class="keyword">typedef</span> concurrent_vector_base_v3 concurrent_vector_base;
+<a name="l00188"></a>00188 
+<a name="l00190"></a>00190 
+<a name="l00192"></a>00192     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value&gt;
+<a name="l00193"></a>00193     <span class="keyword">class </span>vector_iterator 
+<a name="l00194"></a>00194     {
+<a name="l00196"></a>00196         Container* my_vector;
+<a name="l00197"></a>00197 
+<a name="l00199"></a>00199         size_t my_index;
+<a name="l00200"></a>00200 
+<a name="l00202"></a>00202 
+<a name="l00203"></a>00203         <span class="keyword">mutable</span> Value* my_item;
+<a name="l00204"></a>00204 
+<a name="l00205"></a>00205         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T&gt;
+<a name="l00206"></a>00206         <span class="keyword">friend</span> vector_iterator&lt;C,T&gt; operator+( ptrdiff_t offset, <span class="keyword">const</span> vector_iterator&lt;C,T&gt;&amp; v );
+<a name="l00207"></a>00207 
+<a name="l00208"></a>00208         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00209"></a>00209         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> vector_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;C,U&gt;&amp; j );
+<a name="l00210"></a>00210 
+<a name="l00211"></a>00211         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00212"></a>00212         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&lt;( const vector_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;C,U&gt;&amp; j );
+<a name="l00213"></a>00213 
+<a name="l00214"></a>00214         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00215"></a>00215         <span class="keyword">friend</span> ptrdiff_t operator-( <span class="keyword">const</span> vector_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;C,U&gt;&amp; j );
+<a name="l00216"></a>00216     
+<a name="l00217"></a>00217         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> U&gt;
+<a name="l00218"></a>00218         <span class="keyword">friend</span> <span class="keyword">class </span>internal::vector_iterator;
+<a name="l00219"></a>00219 
+<a name="l00220"></a>00220 <span class="preprocessor">#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)</span>
+<a name="l00221"></a>00221 <span class="preprocessor"></span>        <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00222"></a>00222         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00166.html">tbb::concurrent_vector</a>;
+<a name="l00223"></a>00223 <span class="preprocessor">#else</span>
+<a name="l00224"></a>00224 <span class="preprocessor"></span><span class="keyword">public</span>: <span class="comment">// workaround for MSVC</span>
+<a name="l00225"></a>00225 <span class="preprocessor">#endif </span>
+<a name="l00226"></a>00226 <span class="preprocessor"></span>
+<a name="l00227"></a>00227         vector_iterator( <span class="keyword">const</span> Container&amp; vector, size_t index, <span class="keywordtype">void</span> *ptr = 0 ) : 
+<a name="l00228"></a>00228             my_vector(const_cast&lt;Container*&gt;(&amp;vector)), 
+<a name="l00229"></a>00229             my_index(index), 
+<a name="l00230"></a>00230             my_item(static_cast&lt;Value*&gt;(ptr))
+<a name="l00231"></a>00231         {}
+<a name="l00232"></a>00232 
+<a name="l00233"></a>00233     <span class="keyword">public</span>:
+<a name="l00235"></a>00235         vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
+<a name="l00236"></a>00236 
+<a name="l00237"></a>00237         vector_iterator( <span class="keyword">const</span> vector_iterator&lt;Container,typename Container::value_type&gt;&amp; other ) :
+<a name="l00238"></a>00238             my_vector(other.my_vector),
+<a name="l00239"></a>00239             my_index(other.my_index),
+<a name="l00240"></a>00240             my_item(other.my_item)
+<a name="l00241"></a>00241         {}
+<a name="l00242"></a>00242 
+<a name="l00243"></a>00243         vector_iterator operator+( ptrdiff_t offset )<span class="keyword"> const </span>{
+<a name="l00244"></a>00244             <span class="keywordflow">return</span> vector_iterator( *my_vector, my_index+offset );
+<a name="l00245"></a>00245         }
+<a name="l00246"></a>00246         vector_iterator &amp;operator+=( ptrdiff_t offset ) {
+<a name="l00247"></a>00247             my_index+=offset;
+<a name="l00248"></a>00248             my_item = NULL;
+<a name="l00249"></a>00249             <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00250"></a>00250         }
+<a name="l00251"></a>00251         vector_iterator operator-( ptrdiff_t offset )<span class="keyword"> const </span>{
+<a name="l00252"></a>00252             <span class="keywordflow">return</span> vector_iterator( *my_vector, my_index-offset );
+<a name="l00253"></a>00253         }
+<a name="l00254"></a>00254         vector_iterator &amp;operator-=( ptrdiff_t offset ) {
+<a name="l00255"></a>00255             my_index-=offset;
+<a name="l00256"></a>00256             my_item = NULL;
+<a name="l00257"></a>00257             <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00258"></a>00258         }
+<a name="l00259"></a>00259         Value&amp; operator*()<span class="keyword"> const </span>{
+<a name="l00260"></a>00260             Value* item = my_item;
+<a name="l00261"></a>00261             <span class="keywordflow">if</span>( !item ) {
+<a name="l00262"></a>00262                 item = my_item = &amp;my_vector-&gt;internal_subscript(my_index);
+<a name="l00263"></a>00263             }
+<a name="l00264"></a>00264             __TBB_ASSERT( item==&amp;my_vector-&gt;internal_subscript(my_index), <span class="stringliteral">"corrupt cache"</span> );
+<a name="l00265"></a>00265             <span class="keywordflow">return</span> *item;
+<a name="l00266"></a>00266         }
+<a name="l00267"></a>00267         Value&amp; <a class="code" href="a00166.html#4c52f2950bb1832886bd4458eb09d7eb">operator[]</a>( ptrdiff_t k )<span class="keyword"> const </span>{
+<a name="l00268"></a>00268             <span class="keywordflow">return</span> my_vector-&gt;internal_subscript(my_index+k);
+<a name="l00269"></a>00269         }
+<a name="l00270"></a>00270         Value* operator-&gt;()<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00271"></a>00271 
+<a name="l00273"></a>00273         vector_iterator&amp; operator++() {
+<a name="l00274"></a>00274             size_t k = ++my_index;
+<a name="l00275"></a>00275             <span class="keywordflow">if</span>( my_item ) {
+<a name="l00276"></a>00276                 <span class="comment">// Following test uses 2's-complement wizardry</span>
+<a name="l00277"></a>00277                 <span class="keywordflow">if</span>( (k&amp; (k-2))==0 ) {
+<a name="l00278"></a>00278                     <span class="comment">// k is a power of two that is at least k-2</span>
+<a name="l00279"></a>00279                     my_item= NULL;
+<a name="l00280"></a>00280                 } <span class="keywordflow">else</span> {
+<a name="l00281"></a>00281                     ++my_item;
+<a name="l00282"></a>00282                 }
+<a name="l00283"></a>00283             }
+<a name="l00284"></a>00284             <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00285"></a>00285         }
+<a name="l00286"></a>00286 
+<a name="l00288"></a>00288         vector_iterator&amp; operator--() {
+<a name="l00289"></a>00289             __TBB_ASSERT( my_index&gt;0, <span class="stringliteral">"operator--() applied to iterator already at beginning of concurrent_vector"</span> ); 
+<a name="l00290"></a>00290             size_t k = my_index--;
+<a name="l00291"></a>00291             <span class="keywordflow">if</span>( my_item ) {
+<a name="l00292"></a>00292                 <span class="comment">// Following test uses 2's-complement wizardry</span>
+<a name="l00293"></a>00293                 <span class="keywordflow">if</span>( (k&amp; (k-2))==0 ) {
+<a name="l00294"></a>00294                     <span class="comment">// k is a power of two that is at least k-2  </span>
+<a name="l00295"></a>00295                     my_item= NULL;
+<a name="l00296"></a>00296                 } <span class="keywordflow">else</span> {
+<a name="l00297"></a>00297                     --my_item;
+<a name="l00298"></a>00298                 }
+<a name="l00299"></a>00299             }
+<a name="l00300"></a>00300             <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00301"></a>00301         }
+<a name="l00302"></a>00302 
+<a name="l00304"></a>00304         vector_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00305"></a>00305             vector_iterator result = *<span class="keyword">this</span>;
+<a name="l00306"></a>00306             operator++();
+<a name="l00307"></a>00307             <span class="keywordflow">return</span> result;
+<a name="l00308"></a>00308         }
+<a name="l00309"></a>00309 
+<a name="l00311"></a>00311         vector_iterator operator--(<span class="keywordtype">int</span>) {
+<a name="l00312"></a>00312             vector_iterator result = *<span class="keyword">this</span>;
+<a name="l00313"></a>00313             operator--();
+<a name="l00314"></a>00314             <span class="keywordflow">return</span> result;
+<a name="l00315"></a>00315         }
+<a name="l00316"></a>00316 
+<a name="l00317"></a>00317         <span class="comment">// STL support</span>
+<a name="l00318"></a>00318 
+<a name="l00319"></a>00319         <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00320"></a>00320         <span class="keyword">typedef</span> Value value_type;
+<a name="l00321"></a>00321         <span class="keyword">typedef</span> Value* pointer;
+<a name="l00322"></a>00322         <span class="keyword">typedef</span> Value&amp; reference;
+<a name="l00323"></a>00323         <span class="keyword">typedef</span> std::random_access_iterator_tag iterator_category;
+<a name="l00324"></a>00324     };
+<a name="l00325"></a>00325 
+<a name="l00326"></a>00326     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T&gt;
+<a name="l00327"></a>00327     vector_iterator&lt;Container,T&gt; operator+( ptrdiff_t offset, <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; v ) {
+<a name="l00328"></a>00328         <span class="keywordflow">return</span> vector_iterator&lt;Container,T&gt;( *v.my_vector, v.my_index+offset );
+<a name="l00329"></a>00329     }
+<a name="l00330"></a>00330 
+<a name="l00331"></a>00331     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00332"></a>00332     <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00333"></a>00333         <span class="keywordflow">return</span> i.my_index==j.my_index &amp;&amp; i.my_vector == j.my_vector;
+<a name="l00334"></a>00334     }
+<a name="l00335"></a>00335 
+<a name="l00336"></a>00336     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00337"></a>00337     <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00338"></a>00338         <span class="keywordflow">return</span> !(i==j);
+<a name="l00339"></a>00339     }
+<a name="l00340"></a>00340 
+<a name="l00341"></a>00341     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00342"></a>00342     <span class="keywordtype">bool</span> operator&lt;( const vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00343"></a>00343         <span class="keywordflow">return</span> i.my_index&lt;j.my_index;
+<a name="l00344"></a>00344     }
+<a name="l00345"></a>00345 
+<a name="l00346"></a>00346     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00347"></a>00347     <span class="keywordtype">bool</span> operator&gt;( <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00348"></a>00348         <span class="keywordflow">return</span> j&lt;i;
+<a name="l00349"></a>00349     }
+<a name="l00350"></a>00350 
+<a name="l00351"></a>00351     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00352"></a>00352     <span class="keywordtype">bool</span> operator&gt;=( <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00353"></a>00353         <span class="keywordflow">return</span> !(i&lt;j);
+<a name="l00354"></a>00354     }
+<a name="l00355"></a>00355 
+<a name="l00356"></a>00356     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00357"></a>00357     <span class="keywordtype">bool</span> operator&lt;=( const vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00358"></a>00358         <span class="keywordflow">return</span> !(j&lt;i);
+<a name="l00359"></a>00359     }
+<a name="l00360"></a>00360 
+<a name="l00361"></a>00361     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00362"></a>00362     ptrdiff_t operator-( <span class="keyword">const</span> vector_iterator&lt;Container,T&gt;&amp; i, <span class="keyword">const</span> vector_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00363"></a>00363         <span class="keywordflow">return</span> ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
+<a name="l00364"></a>00364     }
+<a name="l00365"></a>00365 
+<a name="l00366"></a>00366     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00367"></a>00367     <span class="keyword">class </span>allocator_base {
+<a name="l00368"></a>00368     <span class="keyword">public</span>:
+<a name="l00369"></a>00369         <span class="keyword">typedef</span> <span class="keyword">typename</span> A::template
+<a name="l00370"></a>00370             rebind&lt;T&gt;::other allocator_type;
+<a name="l00371"></a>00371         allocator_type my_allocator;
+<a name="l00372"></a>00372 
+<a name="l00373"></a>00373         allocator_base(<span class="keyword">const</span> allocator_type &amp;a = allocator_type() ) : my_allocator(a) {}
+<a name="l00374"></a>00374     };
+<a name="l00375"></a>00375 
+<a name="l00376"></a>00376 } <span class="comment">// namespace internal</span>
+<a name="l00378"></a>00378 <span class="comment"></span>
+<a name="l00380"></a>00380 
+<a name="l00441"></a>00441 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00442"></a><a class="code" href="a00166.html">00442</a> <span class="keyword">class </span><a class="code" href="a00166.html">concurrent_vector</a>: <span class="keyword">protected</span> internal::allocator_base&lt;T, A&gt;,
+<a name="l00443"></a>00443                          <span class="keyword">private</span> internal::concurrent_vector_base {
+<a name="l00444"></a>00444 <span class="keyword">private</span>:
+<a name="l00445"></a>00445     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00446"></a>00446     <span class="keyword">class </span>generic_range_type: <span class="keyword">public</span> <a class="code" href="a00152.html">blocked_range</a>&lt;I&gt; {
+<a name="l00447"></a>00447     <span class="keyword">public</span>:
+<a name="l00448"></a>00448         <span class="keyword">typedef</span> T value_type;
+<a name="l00449"></a>00449         <span class="keyword">typedef</span> T&amp; reference;
+<a name="l00450"></a>00450         <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; const_reference;
+<a name="l00451"></a>00451         <span class="keyword">typedef</span> I iterator;
+<a name="l00452"></a>00452         <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00453"></a>00453         generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(begin_,end_,grainsize_) {} 
+<a name="l00454"></a>00454         template&lt;typename U&gt;
+<a name="l00455"></a>00455         generic_range_type( <span class="keyword">const</span> generic_range_type&lt;U&gt;&amp; r) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(r.begin(),r.end(),r.grainsize()) {} 
+<a name="l00456"></a>00456         generic_range_type( generic_range_type&amp; r, <a class="code" href="a00203.html">split</a> ) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(r,<a class="code" href="a00203.html">split</a>()) {}
+<a name="l00457"></a>00457     };
+<a name="l00458"></a>00458 
+<a name="l00459"></a>00459     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> U&gt;
+<a name="l00460"></a>00460     <span class="keyword">friend</span> <span class="keyword">class </span>internal::vector_iterator;
+<a name="l00461"></a>00461 <span class="keyword">public</span>:
+<a name="l00462"></a>00462     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00463"></a>00463     <span class="comment">// STL compatible types</span>
+<a name="l00464"></a>00464     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00465"></a>00465     <span class="keyword">typedef</span> internal::concurrent_vector_base_v3::size_type size_type;
+<a name="l00466"></a>00466     <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::allocator_base&lt;T, A&gt;::allocator_type allocator_type;
+<a name="l00467"></a>00467 
+<a name="l00468"></a>00468     <span class="keyword">typedef</span> T value_type;
+<a name="l00469"></a>00469     <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00470"></a>00470     <span class="keyword">typedef</span> T&amp; reference;
+<a name="l00471"></a>00471     <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; const_reference;
+<a name="l00472"></a>00472     <span class="keyword">typedef</span> T *pointer;
+<a name="l00473"></a>00473     <span class="keyword">typedef</span> <span class="keyword">const</span> T *const_pointer;
+<a name="l00474"></a>00474 
+<a name="l00475"></a>00475     <span class="keyword">typedef</span> internal::vector_iterator&lt;concurrent_vector,T&gt; iterator;
+<a name="l00476"></a>00476     <span class="keyword">typedef</span> internal::vector_iterator&lt;concurrent_vector,const T&gt; const_iterator;
+<a name="l00477"></a>00477 
+<a name="l00478"></a>00478 <span class="preprocessor">#if !defined(_MSC_VER) || _CPPLIB_VER&gt;=300 </span>
+<a name="l00479"></a>00479 <span class="preprocessor"></span>    <span class="comment">// Assume ISO standard definition of std::reverse_iterator</span>
+<a name="l00480"></a>00480     <span class="keyword">typedef</span> std::reverse_iterator&lt;iterator&gt; reverse_iterator;
+<a name="l00481"></a>00481     <span class="keyword">typedef</span> std::reverse_iterator&lt;const_iterator&gt; const_reverse_iterator;
+<a name="l00482"></a>00482 <span class="preprocessor">#else</span>
+<a name="l00483"></a>00483 <span class="preprocessor"></span>    <span class="comment">// Use non-standard std::reverse_iterator</span>
+<a name="l00484"></a>00484     <span class="keyword">typedef</span> std::reverse_iterator&lt;iterator,T,T&amp;,T*&gt; reverse_iterator;
+<a name="l00485"></a>00485     <span class="keyword">typedef</span> std::reverse_iterator&lt;const_iterator,T,const T&amp;,const T*&gt; const_reverse_iterator;
+<a name="l00486"></a>00486 <span class="preprocessor">#endif </span><span class="comment">/* defined(_MSC_VER) &amp;&amp; (_MSC_VER&lt;1300) */</span>
+<a name="l00487"></a>00487 
+<a name="l00488"></a>00488     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00489"></a>00489     <span class="comment">// Parallel algorithm support</span>
+<a name="l00490"></a>00490     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00491"></a>00491     <span class="keyword">typedef</span> generic_range_type&lt;iterator&gt; range_type;
+<a name="l00492"></a>00492     <span class="keyword">typedef</span> generic_range_type&lt;const_iterator&gt; const_range_type;
+<a name="l00493"></a>00493 
+<a name="l00494"></a>00494     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00495"></a>00495     <span class="comment">// STL compatible constructors &amp; destructors</span>
+<a name="l00496"></a>00496     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00497"></a>00497 
+<a name="l00499"></a><a class="code" href="a00166.html#2c8ca9cabfcd30ad5943324c853664b5">00499</a>     <span class="keyword">explicit</span> <a class="code" href="a00166.html">concurrent_vector</a>(<span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00500"></a>00500         : internal::allocator_base&lt;T, A&gt;(a), internal::concurrent_vector_base()
+<a name="l00501"></a>00501     {
+<a name="l00502"></a>00502         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00503"></a>00503     }
+<a name="l00504"></a>00504 
+<a name="l00506"></a><a class="code" href="a00166.html#dd8a200b99a8088435a37934b58fe335">00506</a>     <a class="code" href="a00166.html">concurrent_vector</a>( <span class="keyword">const</span> <a class="code" href="a00166.html">concurrent_vector</a>&amp; vector, <span class="keyword">const</span> allocator_type&amp; a = allocator_type() )
+<a name="l00507"></a>00507         : internal::allocator_base&lt;T, A&gt;(a), internal::concurrent_vector_base()
+<a name="l00508"></a>00508     {
+<a name="l00509"></a>00509         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00510"></a>00510         __TBB_TRY {
+<a name="l00511"></a>00511             internal_copy(vector, <span class="keyword">sizeof</span>(T), &amp;copy_array);
+<a name="l00512"></a>00512         } __TBB_CATCH(...) {
+<a name="l00513"></a>00513             segment_t *table = my_segment;
+<a name="l00514"></a>00514             internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00515"></a>00515             __TBB_RETHROW();
+<a name="l00516"></a>00516         }
+<a name="l00517"></a>00517     }
+<a name="l00518"></a>00518 
+<a name="l00520"></a>00520     <span class="keyword">template</span>&lt;<span class="keyword">class</span> M&gt;
+<a name="l00521"></a><a class="code" href="a00166.html#64432f13f7b29bfe4acfb5568f34f3a8">00521</a>     <a class="code" href="a00166.html">concurrent_vector</a>( <span class="keyword">const</span> <a class="code" href="a00166.html">concurrent_vector&lt;T, M&gt;</a>&amp; vector, <span class="keyword">const</span> allocator_type&amp; a = allocator_type() )
+<a name="l00522"></a>00522         : internal::allocator_base&lt;T, A&gt;(a), internal::concurrent_vector_base()
+<a name="l00523"></a>00523     {
+<a name="l00524"></a>00524         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00525"></a>00525         __TBB_TRY {
+<a name="l00526"></a>00526             internal_copy(vector.<a class="code" href="a00166.html#bb5ae659871478f1f5c68039e1273e12">internal_vector_base</a>(), <span class="keyword">sizeof</span>(T), &amp;copy_array);
+<a name="l00527"></a>00527         } __TBB_CATCH(...) {
+<a name="l00528"></a>00528             segment_t *table = my_segment;
+<a name="l00529"></a>00529             internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00530"></a>00530             __TBB_RETHROW();
+<a name="l00531"></a>00531         }
+<a name="l00532"></a>00532     }
+<a name="l00533"></a>00533 
+<a name="l00535"></a><a class="code" href="a00166.html#2a2e261dfe1cab3f73f7b1a94137cfca">00535</a>     <span class="keyword">explicit</span> <a class="code" href="a00166.html">concurrent_vector</a>(size_type n)
+<a name="l00536"></a>00536     {
+<a name="l00537"></a>00537         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00538"></a>00538         __TBB_TRY {
+<a name="l00539"></a>00539             internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), NULL, &amp;destroy_array, &amp;initialize_array );
+<a name="l00540"></a>00540         } __TBB_CATCH(...) {
+<a name="l00541"></a>00541             segment_t *table = my_segment;
+<a name="l00542"></a>00542             internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00543"></a>00543             __TBB_RETHROW();
+<a name="l00544"></a>00544         }
+<a name="l00545"></a>00545     }
+<a name="l00546"></a>00546 
+<a name="l00548"></a><a class="code" href="a00166.html#3883a8a908b44e249a57f454de3f55d8">00548</a>     <a class="code" href="a00166.html">concurrent_vector</a>(size_type n, const_reference t, <span class="keyword">const</span> allocator_type&amp; a = allocator_type())
+<a name="l00549"></a>00549         : internal::allocator_base&lt;T, A&gt;(a)
+<a name="l00550"></a>00550     {
+<a name="l00551"></a>00551         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00552"></a>00552         __TBB_TRY {
+<a name="l00553"></a>00553             internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), static_cast&lt;const void*&gt;(&amp;t), &amp;destroy_array, &amp;initialize_array_by );
+<a name="l00554"></a>00554         } __TBB_CATCH(...) {
+<a name="l00555"></a>00555             segment_t *table = my_segment;
+<a name="l00556"></a>00556             internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00557"></a>00557             __TBB_RETHROW();
+<a name="l00558"></a>00558         }
+<a name="l00559"></a>00559     }
+<a name="l00560"></a>00560 
+<a name="l00562"></a>00562     <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00563"></a><a class="code" href="a00166.html#4450de83c5862ea4bcd9443fd7e67419">00563</a>     <a class="code" href="a00166.html">concurrent_vector</a>(I first, I last, <span class="keyword">const</span> allocator_type &amp;a = allocator_type())
+<a name="l00564"></a>00564         : internal::allocator_base&lt;T, A&gt;(a)
+<a name="l00565"></a>00565     {
+<a name="l00566"></a>00566         vector_allocator_ptr = &amp;internal_allocator;
+<a name="l00567"></a>00567         __TBB_TRY {
+<a name="l00568"></a>00568             internal_assign_range(first, last, <span class="keyword">static_cast</span>&lt;is_integer_tag&lt;std::numeric_limits&lt;I&gt;::is_integer&gt; *&gt;(0) );
+<a name="l00569"></a>00569         } __TBB_CATCH(...) {
+<a name="l00570"></a>00570             segment_t *table = my_segment;
+<a name="l00571"></a>00571             internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00572"></a>00572             __TBB_RETHROW();
+<a name="l00573"></a>00573         }
+<a name="l00574"></a>00574     }
+<a name="l00575"></a>00575 
+<a name="l00577"></a><a class="code" href="a00166.html#691f0f3cda3e489c37a657016e375eaf">00577</a>     <a class="code" href="a00166.html">concurrent_vector</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00166.html">concurrent_vector</a>&amp; vector ) {
+<a name="l00578"></a>00578         <span class="keywordflow">if</span>( <span class="keyword">this</span> != &amp;vector )
+<a name="l00579"></a>00579             internal_assign(vector, <span class="keyword">sizeof</span>(T), &amp;destroy_array, &amp;assign_array, &amp;copy_array);
+<a name="l00580"></a>00580         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00581"></a>00581     }
+<a name="l00582"></a>00582 
+<a name="l00584"></a>00584     <span class="keyword">template</span>&lt;<span class="keyword">class</span> M&gt;
+<a name="l00585"></a><a class="code" href="a00166.html#19f4ab88a01b0fd056af3bba463e7bd6">00585</a>     <a class="code" href="a00166.html">concurrent_vector</a>&amp; operator=( <span class="keyword">const</span> <a class="code" href="a00166.html">concurrent_vector&lt;T, M&gt;</a>&amp; vector ) {
+<a name="l00586"></a>00586         <span class="keywordflow">if</span>( static_cast&lt;void*&gt;( <span class="keyword">this</span> ) != static_cast&lt;const void*&gt;( &amp;vector ) )
+<a name="l00587"></a>00587             internal_assign(vector.internal_vector_base(),
+<a name="l00588"></a>00588                 <span class="keyword">sizeof</span>(T), &amp;destroy_array, &amp;assign_array, &amp;copy_array);
+<a name="l00589"></a>00589         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00590"></a>00590     }
+<a name="l00591"></a>00591 
+<a name="l00592"></a>00592     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00593"></a>00593     <span class="comment">// Concurrent operations</span>
+<a name="l00594"></a>00594     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00596"></a>00596 <span class="comment"></span><span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00597"></a>00597 <span class="preprocessor"></span>
+<a name="l00598"></a><a class="code" href="a00166.html#30484e3959892fd5392fa93c873c31f0">00598</a>     size_type grow_by( size_type delta ) {
+<a name="l00599"></a>00599         <span class="keywordflow">return</span> delta ? internal_grow_by( delta, <span class="keyword">sizeof</span>(T), &amp;initialize_array, NULL ) : my_early_size;
+<a name="l00600"></a>00600     }
+<a name="l00601"></a>00601 <span class="preprocessor">#else</span>
+<a name="l00602"></a>00602 <span class="preprocessor"></span>
+<a name="l00603"></a><a class="code" href="a00166.html#c8177b1865270ea68aa1ab9148e5e35e">00603</a>     iterator grow_by( size_type delta ) {
+<a name="l00604"></a>00604         <span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>, delta ? internal_grow_by( delta, <span class="keyword">sizeof</span>(T), &amp;initialize_array, NULL ) : my_early_size);
+<a name="l00605"></a>00605     }
+<a name="l00606"></a>00606 <span class="preprocessor">#endif</span>
+<a name="l00607"></a>00607 <span class="preprocessor"></span>
+<a name="l00609"></a>00609 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00610"></a>00610 <span class="preprocessor"></span>
+<a name="l00611"></a><a class="code" href="a00166.html#38274ab3f772ecba600c7daca7690102">00611</a>     size_type grow_by( size_type delta, const_reference t ) {
+<a name="l00612"></a>00612         <span class="keywordflow">return</span> delta ? internal_grow_by( delta, <span class="keyword">sizeof</span>(T), &amp;initialize_array_by, static_cast&lt;const void*&gt;(&amp;t) ) : my_early_size;
+<a name="l00613"></a>00613     }
+<a name="l00614"></a>00614 <span class="preprocessor">#else</span>
+<a name="l00615"></a>00615 <span class="preprocessor"></span>
+<a name="l00616"></a><a class="code" href="a00166.html#473a59a4c9308b93411b898b3110d26c">00616</a>     iterator grow_by( size_type delta, const_reference t ) {
+<a name="l00617"></a>00617         <span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>, delta ? internal_grow_by( delta, <span class="keyword">sizeof</span>(T), &amp;initialize_array_by, static_cast&lt;const void*&gt;(&amp;t) ) : my_early_size);
+<a name="l00618"></a>00618     }
+<a name="l00619"></a>00619 <span class="preprocessor">#endif</span>
+<a name="l00620"></a>00620 <span class="preprocessor"></span>
+<a name="l00622"></a>00622 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00623"></a>00623 <span class="preprocessor"></span>
+<a name="l00625"></a><a class="code" href="a00166.html#47fe588214dd5fa06ab6e8ab78d83874">00625</a>     <span class="keywordtype">void</span> grow_to_at_least( size_type n ) {
+<a name="l00626"></a>00626         <span class="keywordflow">if</span>( n ) internal_grow_to_at_least_with_result( n, <span class="keyword">sizeof</span>(T), &amp;initialize_array, NULL );
+<a name="l00627"></a>00627     };
+<a name="l00628"></a>00628 <span class="preprocessor">#else</span>
+<a name="l00629"></a>00629 <span class="preprocessor"></span>
+<a name="l00633"></a><a class="code" href="a00166.html#a7e3b67c8ccab16d0aecc80899ae799d">00633</a>     iterator grow_to_at_least( size_type n ) {
+<a name="l00634"></a>00634         size_type m=0;
+<a name="l00635"></a>00635         <span class="keywordflow">if</span>( n ) {
+<a name="l00636"></a>00636             m = internal_grow_to_at_least_with_result( n, <span class="keyword">sizeof</span>(T), &amp;initialize_array, NULL );
+<a name="l00637"></a>00637             <span class="keywordflow">if</span>( m&gt;n ) m=n;
+<a name="l00638"></a>00638         }
+<a name="l00639"></a>00639         <span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>, m);
+<a name="l00640"></a>00640     };
+<a name="l00641"></a>00641 <span class="preprocessor">#endif</span>
+<a name="l00642"></a>00642 <span class="preprocessor"></span>
+<a name="l00644"></a>00644 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00645"></a><a class="code" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">00645</a> <span class="preprocessor"></span>    size_type push_back( const_reference item )
+<a name="l00646"></a>00646 <span class="preprocessor">#else</span>
+<a name="l00647"></a>00647 <span class="preprocessor"></span>
+<a name="l00648"></a>00648     iterator push_back( const_reference item )
+<a name="l00649"></a>00649 <span class="preprocessor">#endif</span>
+<a name="l00650"></a>00650 <span class="preprocessor"></span>    {
+<a name="l00651"></a>00651         size_type k;
+<a name="l00652"></a>00652         <span class="keywordtype">void</span> *ptr = internal_push_back(<span class="keyword">sizeof</span>(T),k);
+<a name="l00653"></a>00653         internal_loop_guide loop(1, ptr);
+<a name="l00654"></a>00654         loop.init(&amp;item);
+<a name="l00655"></a>00655 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00656"></a>00656 <span class="preprocessor"></span>        <span class="keywordflow">return</span> k;
+<a name="l00657"></a>00657 <span class="preprocessor">#else</span>
+<a name="l00658"></a>00658 <span class="preprocessor"></span>        <span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>, k, ptr);
+<a name="l00659"></a>00659 <span class="preprocessor">#endif</span>
+<a name="l00660"></a>00660 <span class="preprocessor"></span>    }
+<a name="l00661"></a>00661 
+<a name="l00663"></a>00663 
+<a name="l00665"></a><a class="code" href="a00166.html#4c52f2950bb1832886bd4458eb09d7eb">00665</a>     reference operator[]( size_type index ) {
+<a name="l00666"></a>00666         <span class="keywordflow">return</span> internal_subscript(index);
+<a name="l00667"></a>00667     }
+<a name="l00668"></a>00668 
+<a name="l00670"></a><a class="code" href="a00166.html#c6fade5c732cc95274d1d8277ea619d1">00670</a>     const_reference operator[]( size_type index )<span class="keyword"> const </span>{
+<a name="l00671"></a>00671         <span class="keywordflow">return</span> internal_subscript(index);
+<a name="l00672"></a>00672     }
+<a name="l00673"></a>00673 
+<a name="l00675"></a><a class="code" href="a00166.html#0c073ca43e787c7cbf7b0e26d2221748">00675</a>     reference at( size_type index ) {
+<a name="l00676"></a>00676         <span class="keywordflow">return</span> internal_subscript_with_exceptions(index);
+<a name="l00677"></a>00677     }
+<a name="l00678"></a>00678 
+<a name="l00680"></a><a class="code" href="a00166.html#23e14a38af748edff96a7adc3a0f1c58">00680</a>     const_reference at( size_type index )<span class="keyword"> const </span>{
+<a name="l00681"></a>00681         <span class="keywordflow">return</span> internal_subscript_with_exceptions(index);
+<a name="l00682"></a>00682     }
+<a name="l00683"></a>00683 
+<a name="l00685"></a><a class="code" href="a00166.html#a4c6ffff3bf08b92939aa2fc516edfba">00685</a>     range_type range( size_t grainsize = 1) {
+<a name="l00686"></a>00686         <span class="keywordflow">return</span> range_type( begin(), end(), grainsize );
+<a name="l00687"></a>00687     }
+<a name="l00688"></a>00688 
+<a name="l00690"></a><a class="code" href="a00166.html#3d09ccfb581b879ae64203741035e193">00690</a>     const_range_type range( size_t grainsize = 1 )<span class="keyword"> const </span>{
+<a name="l00691"></a>00691         <span class="keywordflow">return</span> const_range_type( begin(), end(), grainsize );
+<a name="l00692"></a>00692     }
+<a name="l00693"></a>00693     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00694"></a>00694     <span class="comment">// Capacity</span>
+<a name="l00695"></a>00695     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00697"></a><a class="code" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">00697</a> <span class="comment"></span>    size_type size()<span class="keyword"> const </span>{
+<a name="l00698"></a>00698         size_type sz = my_early_size, cp = internal_capacity();
+<a name="l00699"></a>00699         <span class="keywordflow">return</span> cp &lt; sz ? cp : sz;
+<a name="l00700"></a>00700     }
+<a name="l00701"></a>00701 
+<a name="l00703"></a><a class="code" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">00703</a>     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> !my_early_size;}
+<a name="l00704"></a>00704 
+<a name="l00706"></a><a class="code" href="a00166.html#3ed6b9ae7217af5103d974045b6f5cd5">00706</a>     size_type capacity()<span class="keyword"> const </span>{<span class="keywordflow">return</span> internal_capacity();}
+<a name="l00707"></a>00707 
+<a name="l00709"></a>00709 
+<a name="l00711"></a><a class="code" href="a00166.html#5a0ce05026994b010018f72cfdeb72c1">00711</a>     <span class="keywordtype">void</span> reserve( size_type n ) {
+<a name="l00712"></a>00712         <span class="keywordflow">if</span>( n )
+<a name="l00713"></a>00713             internal_reserve(n, <span class="keyword">sizeof</span>(T), max_size());
+<a name="l00714"></a>00714     }
+<a name="l00715"></a>00715 
+<a name="l00717"></a><a class="code" href="a00166.html#8dfb0cb0eef96d440b4dcf801807a718">00717</a>     <span class="keywordtype">void</span> resize( size_type n ) {
+<a name="l00718"></a>00718         internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), NULL, &amp;destroy_array, &amp;initialize_array );
+<a name="l00719"></a>00719     }
+<a name="l00720"></a>00720     
+<a name="l00722"></a><a class="code" href="a00166.html#98ce6b2c6d2622f0c030b46dfac3880c">00722</a>     <span class="keywordtype">void</span> resize( size_type n, const_reference t ) {
+<a name="l00723"></a>00723         internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), static_cast&lt;const void*&gt;(&amp;t), &amp;destroy_array, &amp;initialize_array_by );
+<a name="l00724"></a>00724     }
+<a name="l00725"></a>00725    
+<a name="l00726"></a>00726 <span class="preprocessor">#if TBB_DEPRECATED </span>
+<a name="l00728"></a><a class="code" href="a00166.html#1693d1da41b1a8235871be9c6633be35">00728</a> <span class="preprocessor">    void compact() {shrink_to_fit();}</span>
+<a name="l00729"></a>00729 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_DEPRECATED */</span>
+<a name="l00730"></a>00730 
+<a name="l00732"></a>00732     <span class="keywordtype">void</span> shrink_to_fit();
+<a name="l00733"></a>00733 
+<a name="l00735"></a><a class="code" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">00735</a>     size_type max_size()<span class="keyword"> const </span>{<span class="keywordflow">return</span> (~size_type(0))/<span class="keyword">sizeof</span>(T);}
+<a name="l00736"></a>00736 
+<a name="l00737"></a>00737     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00738"></a>00738     <span class="comment">// STL support</span>
+<a name="l00739"></a>00739     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00740"></a>00740 
+<a name="l00742"></a><a class="code" href="a00166.html#730b23a251ecb6d37f692fb22f38e029">00742</a>     iterator begin() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>,0);}
+<a name="l00744"></a><a class="code" href="a00166.html#c0b51160e5a764982ec97a455f94f2c6">00744</a>     iterator end() {<span class="keywordflow">return</span> iterator(*<span class="keyword">this</span>,size());}
+<a name="l00746"></a><a class="code" href="a00166.html#78a06182276ff758788d4c0623ae0d71">00746</a>     const_iterator begin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,0);}
+<a name="l00748"></a><a class="code" href="a00166.html#1e6aa764ce5a1cbd24526f68bc0a2f6b">00748</a>     const_iterator end()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,size());}
+<a name="l00750"></a><a class="code" href="a00166.html#f88fcf1c920693c39bd9709db33c199f">00750</a>     const_iterator cbegin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,0);}
+<a name="l00752"></a><a class="code" href="a00166.html#0c15a5d0f1cf75d687dabba07da1d46b">00752</a>     const_iterator cend()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_iterator(*<span class="keyword">this</span>,size());}
+<a name="l00754"></a><a class="code" href="a00166.html#5e220926d09236d98f04fe0721e5f9a1">00754</a>     reverse_iterator rbegin() {<span class="keywordflow">return</span> reverse_iterator(end());}
+<a name="l00756"></a><a class="code" href="a00166.html#290119a4eb43cd6a9e98fa17016ba3c2">00756</a>     reverse_iterator rend() {<span class="keywordflow">return</span> reverse_iterator(begin());}
+<a name="l00758"></a><a class="code" href="a00166.html#9f9c103e18d5f212703805354074ad44">00758</a>     const_reverse_iterator rbegin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_reverse_iterator(end());}
+<a name="l00760"></a><a class="code" href="a00166.html#d438b9b32ea3a8ffb703015b6dce055b">00760</a>     const_reverse_iterator rend()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_reverse_iterator(begin());}
+<a name="l00762"></a><a class="code" href="a00166.html#db78a1d28c9c966050e8a2926d834a33">00762</a>     const_reverse_iterator crbegin()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_reverse_iterator(end());}
+<a name="l00764"></a><a class="code" href="a00166.html#fff9cece89438587997ebedf93c5e962">00764</a>     const_reverse_iterator crend()<span class="keyword"> const </span>{<span class="keywordflow">return</span> const_reverse_iterator(begin());}
+<a name="l00766"></a><a class="code" href="a00166.html#15181759c0bfa2ddce5d10c7550e0002">00766</a>     reference front() {
+<a name="l00767"></a>00767         __TBB_ASSERT( size()&gt;0, NULL);
+<a name="l00768"></a>00768         <span class="keywordflow">return</span> static_cast&lt;T*&gt;(my_segment[0].array)[0];
+<a name="l00769"></a>00769     }
+<a name="l00771"></a><a class="code" href="a00166.html#502615a858eb9fa0390ee59169065e90">00771</a>     const_reference front()<span class="keyword"> const </span>{
+<a name="l00772"></a>00772         __TBB_ASSERT( size()&gt;0, NULL);
+<a name="l00773"></a>00773         <span class="keywordflow">return</span> static_cast&lt;const T*&gt;(my_segment[0].array)[0];
+<a name="l00774"></a>00774     }
+<a name="l00776"></a><a class="code" href="a00166.html#41ce48d6015a1a2812d41cf620ec3476">00776</a>     reference back() {
+<a name="l00777"></a>00777         __TBB_ASSERT( size()&gt;0, NULL);
+<a name="l00778"></a>00778         <span class="keywordflow">return</span> internal_subscript( size()-1 );
+<a name="l00779"></a>00779     }
+<a name="l00781"></a><a class="code" href="a00166.html#bd518e204107d07fd08d0ec5bdfd383d">00781</a>     const_reference back()<span class="keyword"> const </span>{
+<a name="l00782"></a>00782         __TBB_ASSERT( size()&gt;0, NULL);
+<a name="l00783"></a>00783         <span class="keywordflow">return</span> internal_subscript( size()-1 );
+<a name="l00784"></a>00784     }
+<a name="l00786"></a><a class="code" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">00786</a>     allocator_type get_allocator()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> this-&gt;my_allocator; }
+<a name="l00787"></a>00787 
+<a name="l00789"></a><a class="code" href="a00166.html#423e5aa15e0e3309ad86d026fd85f6f6">00789</a>     <span class="keywordtype">void</span> assign(size_type n, const_reference t) {
+<a name="l00790"></a>00790         clear();
+<a name="l00791"></a>00791         internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), static_cast&lt;const void*&gt;(&amp;t), &amp;destroy_array, &amp;initialize_array_by );
+<a name="l00792"></a>00792     }
+<a name="l00793"></a>00793 
+<a name="l00795"></a>00795     <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00796"></a><a class="code" href="a00166.html#93a06b3112cb804f42f40efb5e7387b4">00796</a>     <span class="keywordtype">void</span> assign(I first, I last) {
+<a name="l00797"></a>00797         clear(); internal_assign_range( first, last, <span class="keyword">static_cast</span>&lt;is_integer_tag&lt;std::numeric_limits&lt;I&gt;::is_integer&gt; *&gt;(0) );
+<a name="l00798"></a>00798     }
+<a name="l00799"></a>00799 
+<a name="l00801"></a><a class="code" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">00801</a>     <span class="keywordtype">void</span> swap(<a class="code" href="a00166.html">concurrent_vector</a> &amp;vector) {
+<a name="l00802"></a>00802         <span class="keywordflow">if</span>( <span class="keyword">this</span> != &amp;vector ) {
+<a name="l00803"></a>00803             concurrent_vector_base_v3::internal_swap(static_cast&lt;concurrent_vector_base_v3&amp;&gt;(vector));
+<a name="l00804"></a>00804             std::swap(this-&gt;my_allocator, vector.my_allocator);
+<a name="l00805"></a>00805         }
+<a name="l00806"></a>00806     }
+<a name="l00807"></a>00807 
+<a name="l00809"></a>00809 
+<a name="l00810"></a><a class="code" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">00810</a>     <span class="keywordtype">void</span> clear() {
+<a name="l00811"></a>00811         internal_clear(&amp;destroy_array);
+<a name="l00812"></a>00812     }
+<a name="l00813"></a>00813 
+<a name="l00815"></a><a class="code" href="a00166.html#da2444b28bb840d38f60d0030333a5fc">00815</a>     ~<a class="code" href="a00166.html">concurrent_vector</a>() {
+<a name="l00816"></a>00816         segment_t *table = my_segment;
+<a name="l00817"></a>00817         internal_free_segments( reinterpret_cast&lt;void**&gt;(table), internal_clear(&amp;destroy_array), my_first_block );
+<a name="l00818"></a>00818         <span class="comment">// base class destructor call should be then</span>
+<a name="l00819"></a>00819     }
+<a name="l00820"></a>00820 
+<a name="l00821"></a>00821     <span class="keyword">const</span> internal::concurrent_vector_base_v3 &amp;internal_vector_base()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> *<span class="keyword">this</span>; }
+<a name="l00822"></a>00822 <span class="keyword">private</span>:
+<a name="l00824"></a>00824     <span class="keyword">static</span> <span class="keywordtype">void</span> *internal_allocator(internal::concurrent_vector_base_v3 &amp;vb, size_t k) {
+<a name="l00825"></a>00825         <span class="keywordflow">return</span> static_cast&lt;concurrent_vector&lt;T, A&gt;&amp;&gt;(vb).my_allocator.allocate(k);
+<a name="l00826"></a>00826     }
+<a name="l00828"></a>00828     <span class="keywordtype">void</span> internal_free_segments(<span class="keywordtype">void</span> *table[], segment_index_t k, segment_index_t first_block);
+<a name="l00829"></a>00829 
+<a name="l00831"></a>00831     T&amp; internal_subscript( size_type index ) <span class="keyword">const</span>;
+<a name="l00832"></a>00832 
+<a name="l00834"></a>00834     T&amp; internal_subscript_with_exceptions( size_type index ) <span class="keyword">const</span>;
+<a name="l00835"></a>00835 
+<a name="l00837"></a>00837     <span class="keywordtype">void</span> internal_assign_n(size_type n, const_pointer p) {
+<a name="l00838"></a>00838         internal_resize( n, <span class="keyword">sizeof</span>(T), max_size(), static_cast&lt;const void*&gt;(p), &amp;destroy_array, p? &amp;initialize_array_by : &amp;initialize_array );
+<a name="l00839"></a>00839     }
+<a name="l00840"></a>00840 
+<a name="l00842"></a>00842     <span class="keyword">template</span>&lt;<span class="keywordtype">bool</span> B&gt; <span class="keyword">class </span>is_integer_tag;
+<a name="l00843"></a>00843 
+<a name="l00845"></a>00845     <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00846"></a>00846     <span class="keywordtype">void</span> internal_assign_range(I first, I last, is_integer_tag&lt;true&gt; *) {
+<a name="l00847"></a>00847         internal_assign_n(static_cast&lt;size_type&gt;(first), &amp;static_cast&lt;T&amp;&gt;(last));
+<a name="l00848"></a>00848     }
+<a name="l00850"></a>00850     <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00851"></a>00851     <span class="keywordtype">void</span> internal_assign_range(I first, I last, is_integer_tag&lt;false&gt; *) {
+<a name="l00852"></a>00852         internal_assign_iterators(first, last);
+<a name="l00853"></a>00853     }
+<a name="l00855"></a>00855     <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00856"></a>00856     <span class="keywordtype">void</span> internal_assign_iterators(I first, I last);
+<a name="l00857"></a>00857 
+<a name="l00859"></a>00859     <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC initialize_array( <span class="keywordtype">void</span>* begin, <span class="keyword">const</span> <span class="keywordtype">void</span>*, size_type n );
+<a name="l00860"></a>00860 
+<a name="l00862"></a>00862     <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC initialize_array_by( <span class="keywordtype">void</span>* begin, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n );
+<a name="l00863"></a>00863 
+<a name="l00865"></a>00865     <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC copy_array( <span class="keywordtype">void</span>* dst, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n );
+<a name="l00866"></a>00866 
+<a name="l00868"></a>00868     <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC assign_array( <span class="keywordtype">void</span>* dst, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n );
+<a name="l00869"></a>00869 
+<a name="l00871"></a>00871     <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC destroy_array( <span class="keywordtype">void</span>* begin, size_type n );
+<a name="l00872"></a>00872 
+<a name="l00874"></a>00874     <span class="keyword">class </span>internal_loop_guide : internal::no_copy {
+<a name="l00875"></a>00875     <span class="keyword">public</span>:
+<a name="l00876"></a>00876         <span class="keyword">const</span> pointer array;
+<a name="l00877"></a>00877         <span class="keyword">const</span> size_type n;
+<a name="l00878"></a>00878         size_type i;
+<a name="l00879"></a>00879         internal_loop_guide(size_type ntrials, <span class="keywordtype">void</span> *ptr)
+<a name="l00880"></a>00880             : array(static_cast&lt;pointer&gt;(ptr)), n(ntrials), i(0) {}
+<a name="l00881"></a>00881         <span class="keywordtype">void</span> init() {   <span class="keywordflow">for</span>(; i &lt; n; ++i) <span class="keyword">new</span>( &amp;array[i] ) T(); }
+<a name="l00882"></a>00882         <span class="keywordtype">void</span> init(<span class="keyword">const</span> <span class="keywordtype">void</span> *src) { <span class="keywordflow">for</span>(; i &lt; n; ++i) new( &amp;array[i] ) T(*static_cast&lt;const T*&gt;(src)); }
+<a name="l00883"></a>00883         <span class="keywordtype">void</span> copy(<span class="keyword">const</span> <span class="keywordtype">void</span> *src) { <span class="keywordflow">for</span>(; i &lt; n; ++i) new( &amp;array[i] ) T(static_cast&lt;const T*&gt;(src)[i]); }
+<a name="l00884"></a>00884         <span class="keywordtype">void</span> assign(<span class="keyword">const</span> <span class="keywordtype">void</span> *src) { <span class="keywordflow">for</span>(; i &lt; n; ++i) array[i] = static_cast&lt;const T*&gt;(src)[i]; }
+<a name="l00885"></a>00885         <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt; <span class="keywordtype">void</span> iterate(I &amp;src) { <span class="keywordflow">for</span>(; i &lt; n; ++i, ++src) <span class="keyword">new</span>( &amp;array[i] ) T( *src ); }
+<a name="l00886"></a>00886         ~internal_loop_guide() {
+<a name="l00887"></a>00887             <span class="keywordflow">if</span>(i &lt; n) <span class="comment">// if exception raised, do zerroing on the rest of items</span>
+<a name="l00888"></a>00888                 std::memset(array+i, 0, (n-i)*<span class="keyword">sizeof</span>(value_type));
+<a name="l00889"></a>00889         }
+<a name="l00890"></a>00890     };
+<a name="l00891"></a>00891 };
+<a name="l00892"></a>00892 
+<a name="l00893"></a>00893 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00894"></a><a class="code" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">00894</a> <span class="keywordtype">void</span> <a class="code" href="a00166.html">concurrent_vector&lt;T, A&gt;::shrink_to_fit</a>() {
+<a name="l00895"></a>00895     internal_segments_table old;
+<a name="l00896"></a>00896     __TBB_TRY {
+<a name="l00897"></a>00897         <span class="keywordflow">if</span>( internal_compact( <span class="keyword">sizeof</span>(T), &amp;old, &amp;destroy_array, &amp;copy_array ) )
+<a name="l00898"></a>00898             internal_free_segments( old.table, pointers_per_long_table, old.first_block ); <span class="comment">// free joined and unnecessary segments</span>
+<a name="l00899"></a>00899     } __TBB_CATCH(...) {
+<a name="l00900"></a>00900         <span class="keywordflow">if</span>( old.first_block ) <span class="comment">// free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]</span>
+<a name="l00901"></a>00901             internal_free_segments( old.table, 1, old.first_block );
+<a name="l00902"></a>00902         __TBB_RETHROW();
+<a name="l00903"></a>00903     }
+<a name="l00904"></a>00904 }
+<a name="l00905"></a>00905 
+<a name="l00906"></a>00906 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00907"></a>00907 <span class="keywordtype">void</span> <a class="code" href="a00166.html">concurrent_vector&lt;T, A&gt;::internal_free_segments</a>(<span class="keywordtype">void</span> *table[], segment_index_t k, segment_index_t first_block) {
+<a name="l00908"></a>00908     <span class="comment">// Free the arrays</span>
+<a name="l00909"></a>00909     <span class="keywordflow">while</span>( k &gt; first_block ) {
+<a name="l00910"></a>00910         --k;
+<a name="l00911"></a>00911         T* array = static_cast&lt;T*&gt;(table[k]);
+<a name="l00912"></a>00912         table[k] = NULL;
+<a name="l00913"></a>00913         <span class="keywordflow">if</span>( array &gt; internal::vector_allocation_error_flag ) <span class="comment">// check for correct segment pointer</span>
+<a name="l00914"></a>00914             this-&gt;my_allocator.deallocate( array, segment_size(k) );
+<a name="l00915"></a>00915     }
+<a name="l00916"></a>00916     T* array = static_cast&lt;T*&gt;(table[0]);
+<a name="l00917"></a>00917     <span class="keywordflow">if</span>( array &gt; internal::vector_allocation_error_flag ) {
+<a name="l00918"></a>00918         __TBB_ASSERT( first_block &gt; 0, NULL );
+<a name="l00919"></a>00919         <span class="keywordflow">while</span>(k &gt; 0) table[--k] = NULL;
+<a name="l00920"></a>00920         this-&gt;my_allocator.deallocate( array, segment_size(first_block) );
+<a name="l00921"></a>00921     }
+<a name="l00922"></a>00922 }
+<a name="l00923"></a>00923 
+<a name="l00924"></a>00924 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00925"></a>00925 T&amp; concurrent_vector&lt;T, A&gt;::internal_subscript( size_type index )<span class="keyword"> const </span>{
+<a name="l00926"></a>00926     __TBB_ASSERT( index &lt; my_early_size, <span class="stringliteral">"index out of bounds"</span> );
+<a name="l00927"></a>00927     size_type j = index;
+<a name="l00928"></a>00928     segment_index_t k = segment_base_index_of( j );
+<a name="l00929"></a>00929     __TBB_ASSERT( (segment_t*)my_segment != my_storage || k &lt; pointers_per_short_table, <span class="stringliteral">"index is being allocated"</span> );
+<a name="l00930"></a>00930     <span class="comment">// no need in __TBB_load_with_acquire since thread works in own space or gets </span>
+<a name="l00931"></a>00931 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00932"></a>00932 <span class="preprocessor"></span>    T* array = static_cast&lt;T*&gt;( tbb::internal::itt_load_pointer_v3(&amp;my_segment[k].array));
+<a name="l00933"></a>00933 <span class="preprocessor">#else</span>
+<a name="l00934"></a>00934 <span class="preprocessor"></span>    T* array = static_cast&lt;T*&gt;(my_segment[k].array);
+<a name="l00935"></a>00935 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS */</span>
+<a name="l00936"></a>00936     __TBB_ASSERT( array != internal::vector_allocation_error_flag, <span class="stringliteral">"the instance is broken by bad allocation. Use at() instead"</span> );
+<a name="l00937"></a>00937     __TBB_ASSERT( array, <span class="stringliteral">"index is being allocated"</span> );
+<a name="l00938"></a>00938     <span class="keywordflow">return</span> array[j];
+<a name="l00939"></a>00939 }
+<a name="l00940"></a>00940 
+<a name="l00941"></a>00941 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00942"></a>00942 T&amp; concurrent_vector&lt;T, A&gt;::internal_subscript_with_exceptions( size_type index )<span class="keyword"> const </span>{
+<a name="l00943"></a>00943     <span class="keywordflow">if</span>( index &gt;= my_early_size )
+<a name="l00944"></a>00944         internal::throw_exception(internal::eid_out_of_range); <span class="comment">// throw std::out_of_range</span>
+<a name="l00945"></a>00945     size_type j = index;
+<a name="l00946"></a>00946     segment_index_t k = segment_base_index_of( j );
+<a name="l00947"></a>00947     <span class="keywordflow">if</span>( (segment_t*)my_segment == my_storage &amp;&amp; k &gt;= pointers_per_short_table )
+<a name="l00948"></a>00948         internal::throw_exception(internal::eid_segment_range_error); <span class="comment">// throw std::range_error</span>
+<a name="l00949"></a>00949     <span class="keywordtype">void</span> *array = my_segment[k].array; <span class="comment">// no need in __TBB_load_with_acquire</span>
+<a name="l00950"></a>00950     <span class="keywordflow">if</span>( array &lt;= internal::vector_allocation_error_flag ) <span class="comment">// check for correct segment pointer</span>
+<a name="l00951"></a>00951         internal::throw_exception(internal::eid_index_range_error); <span class="comment">// throw std::range_error</span>
+<a name="l00952"></a>00952     <span class="keywordflow">return</span> static_cast&lt;T*&gt;(array)[j];
+<a name="l00953"></a>00953 }
+<a name="l00954"></a>00954 
+<a name="l00955"></a>00955 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt; <span class="keyword">template</span>&lt;<span class="keyword">class</span> I&gt;
+<a name="l00956"></a>00956 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::internal_assign_iterators(I first, I last) {
+<a name="l00957"></a>00957     __TBB_ASSERT(my_early_size == 0, NULL);
+<a name="l00958"></a>00958     size_type n = std::distance(first, last);
+<a name="l00959"></a>00959     <span class="keywordflow">if</span>( !n ) <span class="keywordflow">return</span>;
+<a name="l00960"></a>00960     internal_reserve(n, <span class="keyword">sizeof</span>(T), <a class="code" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">max_size</a>());
+<a name="l00961"></a>00961     my_early_size = n;
+<a name="l00962"></a>00962     segment_index_t k = 0;
+<a name="l00963"></a>00963     size_type sz = segment_size( my_first_block );
+<a name="l00964"></a>00964     <span class="keywordflow">while</span>( sz &lt; n ) {
+<a name="l00965"></a>00965         internal_loop_guide loop(sz, my_segment[k].array);
+<a name="l00966"></a>00966         loop.iterate(first);
+<a name="l00967"></a>00967         n -= sz;
+<a name="l00968"></a>00968         <span class="keywordflow">if</span>( !k ) k = my_first_block;
+<a name="l00969"></a>00969         <span class="keywordflow">else</span> { ++k; sz &lt;&lt;= 1; }
+<a name="l00970"></a>00970     }
+<a name="l00971"></a>00971     internal_loop_guide loop(n, my_segment[k].array);
+<a name="l00972"></a>00972     loop.iterate(first);
+<a name="l00973"></a>00973 }
+<a name="l00974"></a>00974 
+<a name="l00975"></a>00975 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00976"></a>00976 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::initialize_array( <span class="keywordtype">void</span>* begin, <span class="keyword">const</span> <span class="keywordtype">void</span> *, size_type n ) {
+<a name="l00977"></a>00977     internal_loop_guide loop(n, begin); loop.init();
+<a name="l00978"></a>00978 }
+<a name="l00979"></a>00979 
+<a name="l00980"></a>00980 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00981"></a>00981 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::initialize_array_by( <span class="keywordtype">void</span>* begin, <span class="keyword">const</span> <span class="keywordtype">void</span> *src, size_type n ) {
+<a name="l00982"></a>00982     internal_loop_guide loop(n, begin); loop.init(src);
+<a name="l00983"></a>00983 }
+<a name="l00984"></a>00984 
+<a name="l00985"></a>00985 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00986"></a>00986 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::copy_array( <span class="keywordtype">void</span>* dst, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n ) {
+<a name="l00987"></a>00987     internal_loop_guide loop(n, dst); loop.copy(src);
+<a name="l00988"></a>00988 }
+<a name="l00989"></a>00989 
+<a name="l00990"></a>00990 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l00991"></a>00991 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::assign_array( <span class="keywordtype">void</span>* dst, <span class="keyword">const</span> <span class="keywordtype">void</span>* src, size_type n ) {
+<a name="l00992"></a>00992     internal_loop_guide loop(n, dst); loop.assign(src);
+<a name="l00993"></a>00993 }
+<a name="l00994"></a>00994 
+<a name="l00995"></a>00995 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER) </span>
+<a name="l00996"></a>00996 <span class="preprocessor"></span>    <span class="comment">// Workaround for overzealous compiler warning</span>
+<a name="l00997"></a>00997 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00998"></a>00998 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4189)</span>
+<a name="l00999"></a>00999 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l01000"></a>01000 <span class="preprocessor"></span><span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l01001"></a>01001 <span class="keywordtype">void</span> concurrent_vector&lt;T, A&gt;::destroy_array( <span class="keywordtype">void</span>* begin, size_type n ) {
+<a name="l01002"></a>01002     T* array = static_cast&lt;T*&gt;(begin);
+<a name="l01003"></a>01003     <span class="keywordflow">for</span>( size_type j=n; j&gt;0; --j )
+<a name="l01004"></a>01004         array[j-1].~T(); <span class="comment">// destructors are supposed to not throw any exceptions</span>
+<a name="l01005"></a>01005 }
+<a name="l01006"></a>01006 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER) </span>
+<a name="l01007"></a>01007 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l01008"></a>01008 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4189 is back </span>
+<a name="l01009"></a>01009 <span class="preprocessor"></span>
+<a name="l01010"></a>01010 <span class="comment">// concurrent_vector's template functions</span>
+<a name="l01011"></a>01011 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01012"></a>01012 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b) {
+<a name="l01013"></a>01013     <span class="comment">// Simply:    return a.size() == b.size() &amp;&amp; std::equal(a.begin(), a.end(), b.begin());</span>
+<a name="l01014"></a>01014     <span class="keywordflow">if</span>(a.size() != b.size()) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01015"></a>01015     <span class="keyword">typename</span> concurrent_vector&lt;T, A1&gt;::const_iterator i(a.begin());
+<a name="l01016"></a>01016     <span class="keyword">typename</span> concurrent_vector&lt;T, A2&gt;::const_iterator j(b.begin());
+<a name="l01017"></a>01017     <span class="keywordflow">for</span>(; i != a.end(); ++i, ++j)
+<a name="l01018"></a>01018         <span class="keywordflow">if</span>( !(*i == *j) ) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l01019"></a>01019     <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l01020"></a>01020 }
+<a name="l01021"></a>01021 
+<a name="l01022"></a>01022 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01023"></a>01023 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b)
+<a name="l01024"></a>01024 {    <span class="keywordflow">return</span> !(a == b); }
+<a name="l01025"></a>01025 
+<a name="l01026"></a>01026 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01027"></a>01027 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&lt;(const concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b)
+<a name="l01028"></a>01028 {    <span class="keywordflow">return</span> (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
+<a name="l01029"></a>01029 
+<a name="l01030"></a>01030 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01031"></a>01031 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&gt;(<span class="keyword">const</span> concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b)
+<a name="l01032"></a>01032 {    <span class="keywordflow">return</span> b &lt; a; }
+<a name="l01033"></a>01033 
+<a name="l01034"></a>01034 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01035"></a>01035 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&lt;=(const concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b)
+<a name="l01036"></a>01036 {    <span class="keywordflow">return</span> !(b &lt; a); }
+<a name="l01037"></a>01037 
+<a name="l01038"></a>01038 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A1, <span class="keyword">class</span> A2&gt;
+<a name="l01039"></a>01039 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&gt;=(<span class="keyword">const</span> concurrent_vector&lt;T, A1&gt; &amp;a, <span class="keyword">const</span> concurrent_vector&lt;T, A2&gt; &amp;b)
+<a name="l01040"></a>01040 {    <span class="keywordflow">return</span> !(a &lt; b); }
+<a name="l01041"></a>01041 
+<a name="l01042"></a>01042 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">class</span> A&gt;
+<a name="l01043"></a>01043 <span class="keyword">inline</span> <span class="keywordtype">void</span> swap(concurrent_vector&lt;T, A&gt; &amp;a, concurrent_vector&lt;T, A&gt; &amp;b)
+<a name="l01044"></a>01044 {    a.swap( b ); }
+<a name="l01045"></a>01045 
+<a name="l01046"></a>01046 } <span class="comment">// namespace tbb</span>
+<a name="l01047"></a>01047 
+<a name="l01048"></a>01048 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; !defined(__INTEL_COMPILER) &amp;&amp; defined(_Wp64)</span>
+<a name="l01049"></a>01049 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l01050"></a>01050 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4267 is back</span>
+<a name="l01051"></a>01051 <span class="preprocessor"></span>
+<a name="l01052"></a>01052 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_concurrent_vector_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00356.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00356.html
new file mode 100644 (file)
index 0000000..3d7bff7
--- /dev/null
@@ -0,0 +1,163 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>critical_section.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>critical_section.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef _TBB_CRITICAL_SECTION_H_</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define _TBB_CRITICAL_SECTION_H_</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#else</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span><span class="preprocessor">#include &lt;pthread.h&gt;</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include &lt;errno.h&gt;</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#endif  // _WIN32||WIN64</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span>
+<a name="l00031"></a>00031 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include "tbb_thread.h"</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037 <span class="keyword">namespace </span>tbb {
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039     <span class="keyword">namespace </span>internal {
+<a name="l00040"></a>00040 <span class="keyword">class </span>critical_section_v4 : internal::no_copy {
+<a name="l00041"></a>00041 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span>    CRITICAL_SECTION my_impl;
+<a name="l00043"></a>00043 <span class="preprocessor">#else</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span>    pthread_mutex_t my_impl;
+<a name="l00045"></a>00045 <span class="preprocessor">#endif</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span>    tbb_thread::id my_tid;
+<a name="l00047"></a>00047 <span class="keyword">public</span>:
+<a name="l00048"></a>00048 
+<a name="l00049"></a>00049     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00050"></a>00050 
+<a name="l00051"></a>00051     critical_section_v4() { 
+<a name="l00052"></a>00052 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span>        InitializeCriticalSection(&amp;my_impl);
+<a name="l00054"></a>00054 <span class="preprocessor">#else</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span>        pthread_mutex_init(&amp;my_impl, NULL);
+<a name="l00056"></a>00056 <span class="preprocessor">#endif</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span>        internal_construct();
+<a name="l00058"></a>00058     }
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060     ~critical_section_v4() {
+<a name="l00061"></a>00061         __TBB_ASSERT(my_tid == tbb_thread::id(), <span class="stringliteral">"Destroying a still-held critical section"</span>);
+<a name="l00062"></a>00062 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00063"></a>00063 <span class="preprocessor"></span>        DeleteCriticalSection(&amp;my_impl); 
+<a name="l00064"></a>00064 <span class="preprocessor">#else</span>
+<a name="l00065"></a>00065 <span class="preprocessor"></span>        pthread_mutex_destroy(&amp;my_impl);
+<a name="l00066"></a>00066 <span class="preprocessor">#endif</span>
+<a name="l00067"></a>00067 <span class="preprocessor"></span>    }
+<a name="l00068"></a>00068 
+<a name="l00069"></a>00069     <span class="keyword">class </span>scoped_lock : internal::no_copy {
+<a name="l00070"></a>00070     <span class="keyword">private</span>:
+<a name="l00071"></a>00071         critical_section_v4 &amp;my_crit;
+<a name="l00072"></a>00072     <span class="keyword">public</span>:
+<a name="l00073"></a>00073         scoped_lock( critical_section_v4&amp; lock_me) :my_crit(lock_me) {
+<a name="l00074"></a>00074             my_crit.lock();
+<a name="l00075"></a>00075         }
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077         ~scoped_lock() {
+<a name="l00078"></a>00078             my_crit.unlock();
+<a name="l00079"></a>00079         }
+<a name="l00080"></a>00080     };
+<a name="l00081"></a>00081 
+<a name="l00082"></a>00082     <span class="keywordtype">void</span> lock() { 
+<a name="l00083"></a>00083         tbb_thread::id local_tid = this_tbb_thread::get_id();
+<a name="l00084"></a>00084         <span class="keywordflow">if</span>(local_tid == my_tid) throw_exception( eid_improper_lock );
+<a name="l00085"></a>00085 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00086"></a>00086 <span class="preprocessor"></span>        EnterCriticalSection( &amp;my_impl );
+<a name="l00087"></a>00087 <span class="preprocessor">#else</span>
+<a name="l00088"></a>00088 <span class="preprocessor"></span>        <span class="keywordtype">int</span> rval = pthread_mutex_lock(&amp;my_impl);
+<a name="l00089"></a>00089         __TBB_ASSERT_EX(!rval, <span class="stringliteral">"critical_section::lock: pthread_mutex_lock failed"</span>);
+<a name="l00090"></a>00090 <span class="preprocessor">#endif</span>
+<a name="l00091"></a>00091 <span class="preprocessor"></span>        __TBB_ASSERT(my_tid == tbb_thread::id(), NULL);
+<a name="l00092"></a>00092         my_tid = local_tid;
+<a name="l00093"></a>00093     }
+<a name="l00094"></a>00094 
+<a name="l00095"></a>00095     <span class="keywordtype">bool</span> try_lock() {
+<a name="l00096"></a>00096         <span class="keywordtype">bool</span> gotlock;
+<a name="l00097"></a>00097         tbb_thread::id local_tid = this_tbb_thread::get_id();
+<a name="l00098"></a>00098         <span class="keywordflow">if</span>(local_tid == my_tid) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00099"></a>00099 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00100"></a>00100 <span class="preprocessor"></span>        gotlock = TryEnterCriticalSection( &amp;my_impl ) != 0;
+<a name="l00101"></a>00101 <span class="preprocessor">#else</span>
+<a name="l00102"></a>00102 <span class="preprocessor"></span>        <span class="keywordtype">int</span> rval = pthread_mutex_trylock(&amp;my_impl);
+<a name="l00103"></a>00103         <span class="comment">// valid returns are 0 (locked) and [EBUSY]</span>
+<a name="l00104"></a>00104         __TBB_ASSERT(rval == 0 || rval == EBUSY, <span class="stringliteral">"critical_section::trylock: pthread_mutex_trylock failed"</span>);
+<a name="l00105"></a>00105         gotlock = rval == 0;
+<a name="l00106"></a>00106 <span class="preprocessor">#endif</span>
+<a name="l00107"></a>00107 <span class="preprocessor"></span>        <span class="keywordflow">if</span>(gotlock)  {
+<a name="l00108"></a>00108             my_tid = local_tid;
+<a name="l00109"></a>00109         }
+<a name="l00110"></a>00110         <span class="keywordflow">return</span> gotlock;
+<a name="l00111"></a>00111     }
+<a name="l00112"></a>00112 
+<a name="l00113"></a>00113     <span class="keywordtype">void</span> unlock() {
+<a name="l00114"></a>00114         __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, <span class="stringliteral">"thread unlocking critical_section is not thread that locked it"</span>);
+<a name="l00115"></a>00115         my_tid = tbb_thread::id();
+<a name="l00116"></a>00116 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>        LeaveCriticalSection( &amp;my_impl );
+<a name="l00118"></a>00118 <span class="preprocessor">#else</span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span>        <span class="keywordtype">int</span> rval = pthread_mutex_unlock(&amp;my_impl);
+<a name="l00120"></a>00120         __TBB_ASSERT_EX(!rval, <span class="stringliteral">"critical_section::unlock: pthread_mutex_unlock failed"</span>);
+<a name="l00121"></a>00121 <span class="preprocessor">#endif</span>
+<a name="l00122"></a>00122 <span class="preprocessor"></span>    }
+<a name="l00123"></a>00123 
+<a name="l00124"></a>00124     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;
+<a name="l00125"></a>00125     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00126"></a>00126     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">true</span>;
+<a name="l00127"></a>00127 }; <span class="comment">// critical_section_v4</span>
+<a name="l00128"></a>00128 } <span class="comment">// namespace internal</span>
+<a name="l00129"></a>00129 <span class="keyword">typedef</span> internal::critical_section_v4 critical_section;
+<a name="l00130"></a>00130 
+<a name="l00131"></a>00131 __TBB_DEFINE_PROFILING_SET_NAME(critical_section)
+<a name="l00132"></a>00132 } <span class="comment">// namespace tbb</span>
+<a name="l00133"></a>00133 <span class="preprocessor">#endif  // _TBB_CRITICAL_SECTION_H_</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00359.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00359.html
new file mode 100644 (file)
index 0000000..4c39dae
--- /dev/null
@@ -0,0 +1,968 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>enumerable_thread_specific.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>enumerable_thread_specific.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_enumerable_thread_specific_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_enumerable_thread_specific_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "concurrent_vector.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_thread.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include &lt;string.h&gt;</span>  <span class="comment">// for memcpy</span>
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#else</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span><span class="preprocessor">#include &lt;pthread.h&gt;</span>
+<a name="l00034"></a>00034 <span class="preprocessor">#endif</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span>
+<a name="l00036"></a>00036 <span class="keyword">namespace </span>tbb {
+<a name="l00037"></a>00037 
+<a name="l00039"></a><a class="code" href="a00272.html#a8622ae61b7e7737dac26542e181178e">00039</a> <span class="keyword">enum</span> <a class="code" href="a00272.html#a8622ae61b7e7737dac26542e181178e">ets_key_usage_type</a> { ets_key_per_instance, ets_no_key };
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="keyword">namespace </span>interface6 {
+<a name="l00042"></a>00042  
+<a name="l00044"></a>00044     <span class="keyword">namespace </span>internal { 
+<a name="l00045"></a>00045 
+<a name="l00046"></a>00046         <span class="keyword">template</span>&lt;ets_key_usage_type ETS_key_type&gt;
+<a name="l00047"></a>00047         <span class="keyword">class </span>ets_base: tbb::internal::no_copy {
+<a name="l00048"></a>00048         <span class="keyword">protected</span>:
+<a name="l00049"></a>00049 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span>            <span class="keyword">typedef</span> DWORD key_type;
+<a name="l00051"></a>00051 <span class="preprocessor">#else</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span>            <span class="keyword">typedef</span> pthread_t key_type;
+<a name="l00053"></a>00053 <span class="preprocessor">#endif</span>
+<a name="l00054"></a>00054 <span class="preprocessor"></span><span class="preprocessor">#if __TBB_GCC_3_3_PROTECTED_BROKEN</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span>        <span class="keyword">public</span>:
+<a name="l00056"></a>00056 <span class="preprocessor">#endif</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span>            <span class="keyword">struct </span>slot;
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059             <span class="keyword">struct </span>array {
+<a name="l00060"></a>00060                 array* next;
+<a name="l00061"></a>00061                 size_t lg_size;
+<a name="l00062"></a>00062                 slot&amp; at( size_t k ) {
+<a name="l00063"></a>00063                     <span class="keywordflow">return</span> ((slot*)(<span class="keywordtype">void</span>*)(<span class="keyword">this</span>+1))[k];
+<a name="l00064"></a>00064                 }
+<a name="l00065"></a>00065                 size_t size()<span class="keyword"> const </span>{<span class="keywordflow">return</span> (size_t)1&lt;&lt;lg_size;}
+<a name="l00066"></a>00066                 size_t mask()<span class="keyword"> const </span>{<span class="keywordflow">return</span> size()-1;}
+<a name="l00067"></a>00067                 size_t start( size_t h )<span class="keyword"> const </span>{
+<a name="l00068"></a>00068                     <span class="keywordflow">return</span> h&gt;&gt;(8*<span class="keyword">sizeof</span>(size_t)-lg_size);
+<a name="l00069"></a>00069                 }
+<a name="l00070"></a>00070             };
+<a name="l00071"></a>00071             <span class="keyword">struct </span>slot {
+<a name="l00072"></a>00072                 key_type key;
+<a name="l00073"></a>00073                 <span class="keywordtype">void</span>* ptr;
+<a name="l00074"></a>00074                 <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> !key;}
+<a name="l00075"></a>00075                 <span class="keywordtype">bool</span> match( key_type k )<span class="keyword"> const </span>{<span class="keywordflow">return</span> key==k;}
+<a name="l00076"></a>00076                 <span class="keywordtype">bool</span> claim( key_type k ) {
+<a name="l00077"></a>00077                     __TBB_ASSERT(<span class="keyword">sizeof</span>(<a class="code" href="a00147.html">tbb::atomic&lt;key_type&gt;</a>)==<span class="keyword">sizeof</span>(key_type), NULL);
+<a name="l00078"></a>00078                     <span class="keywordflow">return</span> tbb::internal::punned_cast&lt;tbb::atomic&lt;key_type&gt;*&gt;(&amp;key)-&gt;compare_and_swap(k,0)==0;
+<a name="l00079"></a>00079                 }
+<a name="l00080"></a>00080             };
+<a name="l00081"></a>00081 <span class="preprocessor">#if __TBB_GCC_3_3_PROTECTED_BROKEN</span>
+<a name="l00082"></a>00082 <span class="preprocessor"></span>        <span class="keyword">protected</span>:
+<a name="l00083"></a>00083 <span class="preprocessor">#endif</span>
+<a name="l00084"></a>00084 <span class="preprocessor"></span>        
+<a name="l00085"></a>00085             <span class="keyword">static</span> key_type key_of_current_thread() {
+<a name="l00086"></a>00086                tbb::tbb_thread::id <span class="keywordtype">id</span> = tbb::this_tbb_thread::get_id();
+<a name="l00087"></a>00087                key_type k;
+<a name="l00088"></a>00088                memcpy( &amp;k, &amp;<span class="keywordtype">id</span>, <span class="keyword">sizeof</span>(k) );
+<a name="l00089"></a>00089                <span class="keywordflow">return</span> k;
+<a name="l00090"></a>00090             }
+<a name="l00091"></a>00091 
+<a name="l00093"></a>00093 
+<a name="l00095"></a>00095             atomic&lt;array*&gt; my_root;
+<a name="l00096"></a>00096             atomic&lt;size_t&gt; my_count;
+<a name="l00097"></a>00097             <span class="keyword">virtual</span> <span class="keywordtype">void</span>* create_local() = 0;
+<a name="l00098"></a>00098             <span class="keyword">virtual</span> <span class="keywordtype">void</span>* create_array(size_t _size) = 0;  <span class="comment">// _size in bytes</span>
+<a name="l00099"></a>00099             <span class="keyword">virtual</span> <span class="keywordtype">void</span> free_array(<span class="keywordtype">void</span>* ptr, size_t _size) = 0; <span class="comment">// _size in bytes</span>
+<a name="l00100"></a>00100             array* allocate( size_t lg_size ) {
+<a name="l00101"></a>00101                 size_t n = 1&lt;&lt;lg_size;  
+<a name="l00102"></a>00102                 array* a = static_cast&lt;array*&gt;(create_array( <span class="keyword">sizeof</span>(array)+n*<span class="keyword">sizeof</span>(slot) ));
+<a name="l00103"></a>00103                 a-&gt;lg_size = lg_size;
+<a name="l00104"></a>00104                 std::memset( a+1, 0, n*<span class="keyword">sizeof</span>(slot) );
+<a name="l00105"></a>00105                 <span class="keywordflow">return</span> a;
+<a name="l00106"></a>00106             }
+<a name="l00107"></a>00107             <span class="keywordtype">void</span> free(array* a) {
+<a name="l00108"></a>00108                 size_t n = 1&lt;&lt;(a-&gt;lg_size);  
+<a name="l00109"></a>00109                 free_array( (<span class="keywordtype">void</span> *)a, size_t(<span class="keyword">sizeof</span>(array)+n*<span class="keyword">sizeof</span>(slot)) );
+<a name="l00110"></a>00110             }
+<a name="l00111"></a>00111             <span class="keyword">static</span> size_t hash( key_type k ) {
+<a name="l00112"></a>00112                 <span class="comment">// Multiplicative hashing.  Client should use *upper* bits.</span>
+<a name="l00113"></a>00113                 <span class="comment">// casts required for Mac gcc4.* compiler</span>
+<a name="l00114"></a>00114 <span class="preprocessor">#if __TBB_WORDSIZE == 4</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span>                <span class="keywordflow">return</span> uintptr_t(k)*0x9E3779B9;
+<a name="l00116"></a>00116 <span class="preprocessor">#else</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>                <span class="keywordflow">return</span> uintptr_t(k)*0x9E3779B97F4A7C15;
+<a name="l00118"></a>00118 <span class="preprocessor">#endif </span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span>            } 
+<a name="l00120"></a>00120         
+<a name="l00121"></a>00121             ets_base() {my_root=NULL; my_count=0;}
+<a name="l00122"></a>00122             <span class="keyword">virtual</span> ~ets_base();  <span class="comment">// g++ complains if this is not virtual...</span>
+<a name="l00123"></a>00123             <span class="keywordtype">void</span>* table_lookup( <span class="keywordtype">bool</span>&amp; exists );
+<a name="l00124"></a>00124             <span class="keywordtype">void</span> table_clear();
+<a name="l00125"></a>00125             slot&amp; table_find( key_type k ) {
+<a name="l00126"></a>00126                 size_t h = hash(k);
+<a name="l00127"></a>00127                 array* r = my_root;
+<a name="l00128"></a>00128                 size_t mask = r-&gt;mask();
+<a name="l00129"></a>00129                 <span class="keywordflow">for</span>(size_t i = r-&gt;start(h);;i=(i+1)&amp;mask) {
+<a name="l00130"></a>00130                     slot&amp; s = r-&gt;at(i);
+<a name="l00131"></a>00131                     <span class="keywordflow">if</span>( s.empty() || s.match(k) )
+<a name="l00132"></a>00132                         <span class="keywordflow">return</span> s;
+<a name="l00133"></a>00133                 }
+<a name="l00134"></a>00134             }
+<a name="l00135"></a>00135             <span class="keywordtype">void</span> table_reserve_for_copy( <span class="keyword">const</span> ets_base&amp; other ) {
+<a name="l00136"></a>00136                 __TBB_ASSERT(!my_root,NULL);
+<a name="l00137"></a>00137                 __TBB_ASSERT(!my_count,NULL);
+<a name="l00138"></a>00138                 <span class="keywordflow">if</span>( other.my_root ) {
+<a name="l00139"></a>00139                     array* a = allocate(other.my_root-&gt;lg_size);
+<a name="l00140"></a>00140                     a-&gt;next = NULL;
+<a name="l00141"></a>00141                     my_root = a;
+<a name="l00142"></a>00142                     my_count = other.my_count;
+<a name="l00143"></a>00143                 }
+<a name="l00144"></a>00144             }
+<a name="l00145"></a>00145         };
+<a name="l00146"></a>00146 
+<a name="l00147"></a>00147         <span class="keyword">template</span>&lt;ets_key_usage_type ETS_key_type&gt;
+<a name="l00148"></a>00148         ets_base&lt;ETS_key_type&gt;::~ets_base() {
+<a name="l00149"></a>00149             __TBB_ASSERT(!my_root, NULL);
+<a name="l00150"></a>00150         }
+<a name="l00151"></a>00151 
+<a name="l00152"></a>00152         <span class="keyword">template</span>&lt;ets_key_usage_type ETS_key_type&gt;
+<a name="l00153"></a>00153         <span class="keywordtype">void</span> ets_base&lt;ETS_key_type&gt;::table_clear() {
+<a name="l00154"></a>00154             <span class="keywordflow">while</span>( array* r = my_root ) {
+<a name="l00155"></a>00155                 my_root = r-&gt;next;
+<a name="l00156"></a>00156                 free(r);
+<a name="l00157"></a>00157             }
+<a name="l00158"></a>00158             my_count = 0;
+<a name="l00159"></a>00159         }
+<a name="l00160"></a>00160                 
+<a name="l00161"></a>00161         <span class="keyword">template</span>&lt;ets_key_usage_type ETS_key_type&gt;
+<a name="l00162"></a>00162         <span class="keywordtype">void</span>* ets_base&lt;ETS_key_type&gt;::table_lookup( <span class="keywordtype">bool</span>&amp; exists ) {
+<a name="l00163"></a>00163             <span class="keyword">const</span> key_type k = key_of_current_thread(); 
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165             __TBB_ASSERT(k!=0,NULL);
+<a name="l00166"></a>00166             <span class="keywordtype">void</span>* found;
+<a name="l00167"></a>00167             size_t h = hash(k);
+<a name="l00168"></a>00168             <span class="keywordflow">for</span>( array* r=my_root; r; r=r-&gt;next ) {
+<a name="l00169"></a>00169                 size_t mask=r-&gt;mask();
+<a name="l00170"></a>00170                 <span class="keywordflow">for</span>(size_t i = r-&gt;start(h); ;i=(i+1)&amp;mask) {
+<a name="l00171"></a>00171                     slot&amp; s = r-&gt;at(i);
+<a name="l00172"></a>00172                     <span class="keywordflow">if</span>( s.empty() ) <span class="keywordflow">break</span>;
+<a name="l00173"></a>00173                     <span class="keywordflow">if</span>( s.match(k) ) {
+<a name="l00174"></a>00174                         <span class="keywordflow">if</span>( r==my_root ) {
+<a name="l00175"></a>00175                             <span class="comment">// Success at top level</span>
+<a name="l00176"></a>00176                             exists = <span class="keyword">true</span>;
+<a name="l00177"></a>00177                             <span class="keywordflow">return</span> s.ptr;
+<a name="l00178"></a>00178                         } <span class="keywordflow">else</span> {
+<a name="l00179"></a>00179                             <span class="comment">// Success at some other level.  Need to insert at top level.</span>
+<a name="l00180"></a>00180                             exists = <span class="keyword">true</span>;
+<a name="l00181"></a>00181                             found = s.ptr;
+<a name="l00182"></a>00182                             <span class="keywordflow">goto</span> insert;
+<a name="l00183"></a>00183                         }
+<a name="l00184"></a>00184                     }
+<a name="l00185"></a>00185                 }
+<a name="l00186"></a>00186             }
+<a name="l00187"></a>00187             <span class="comment">// Key does not yet exist</span>
+<a name="l00188"></a>00188             exists = <span class="keyword">false</span>;
+<a name="l00189"></a>00189             found = create_local();
+<a name="l00190"></a>00190             {
+<a name="l00191"></a>00191                 size_t c = ++my_count;
+<a name="l00192"></a>00192                 array* r = my_root;
+<a name="l00193"></a>00193                 <span class="keywordflow">if</span>( !r || c&gt;r-&gt;size()/2 ) {
+<a name="l00194"></a>00194                     size_t s = r ? r-&gt;lg_size : 2;
+<a name="l00195"></a>00195                     <span class="keywordflow">while</span>( c&gt;size_t(1)&lt;&lt;(s-1) ) ++s;
+<a name="l00196"></a>00196                     array* a = allocate(s);
+<a name="l00197"></a>00197                     <span class="keywordflow">for</span>(;;) {
+<a name="l00198"></a>00198                         a-&gt;next = my_root;
+<a name="l00199"></a>00199                         array* new_r = my_root.compare_and_swap(a,r);
+<a name="l00200"></a>00200                         <span class="keywordflow">if</span>( new_r==r ) <span class="keywordflow">break</span>;
+<a name="l00201"></a>00201                         <span class="keywordflow">if</span>( new_r-&gt;lg_size&gt;=s ) {
+<a name="l00202"></a>00202                             <span class="comment">// Another thread inserted an equal or  bigger array, so our array is superfluous.</span>
+<a name="l00203"></a>00203                             free(a);
+<a name="l00204"></a>00204                             <span class="keywordflow">break</span>;
+<a name="l00205"></a>00205                         }
+<a name="l00206"></a>00206                         r = new_r;
+<a name="l00207"></a>00207                     }
+<a name="l00208"></a>00208                 }
+<a name="l00209"></a>00209             }
+<a name="l00210"></a>00210         insert:
+<a name="l00211"></a>00211             <span class="comment">// Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.</span>
+<a name="l00212"></a>00212             array* ir = my_root;
+<a name="l00213"></a>00213             size_t mask = ir-&gt;mask();
+<a name="l00214"></a>00214             <span class="keywordflow">for</span>(size_t i = ir-&gt;start(h);;i=(i+1)&amp;mask) {
+<a name="l00215"></a>00215                 slot&amp; s = ir-&gt;at(i);
+<a name="l00216"></a>00216                 <span class="keywordflow">if</span>( s.empty() ) {
+<a name="l00217"></a>00217                     <span class="keywordflow">if</span>( s.claim(k) ) {
+<a name="l00218"></a>00218                         s.ptr = found;
+<a name="l00219"></a>00219                         <span class="keywordflow">return</span> found;
+<a name="l00220"></a>00220                     }
+<a name="l00221"></a>00221                 }
+<a name="l00222"></a>00222             }
+<a name="l00223"></a>00223         }
+<a name="l00224"></a>00224 
+<a name="l00226"></a>00226         <span class="keyword">template</span> &lt;&gt;
+<a name="l00227"></a>00227         <span class="keyword">class </span>ets_base&lt;ets_key_per_instance&gt;: <span class="keyword">protected</span> ets_base&lt;ets_no_key&gt; {
+<a name="l00228"></a>00228             <span class="keyword">typedef</span> ets_base&lt;ets_no_key&gt; super;
+<a name="l00229"></a>00229 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00230"></a>00230 <span class="preprocessor"></span>            <span class="keyword">typedef</span> DWORD tls_key_t;
+<a name="l00231"></a>00231             <span class="keywordtype">void</span> create_key() { my_key = TlsAlloc(); }
+<a name="l00232"></a>00232             <span class="keywordtype">void</span> destroy_key() { TlsFree(my_key); }
+<a name="l00233"></a>00233             <span class="keywordtype">void</span> set_tls(<span class="keywordtype">void</span> * value) { TlsSetValue(my_key, (LPVOID)value); }
+<a name="l00234"></a>00234             <span class="keywordtype">void</span>* get_tls() { <span class="keywordflow">return</span> (<span class="keywordtype">void</span> *)TlsGetValue(my_key); }
+<a name="l00235"></a>00235 <span class="preprocessor">#else</span>
+<a name="l00236"></a>00236 <span class="preprocessor"></span>            <span class="keyword">typedef</span> pthread_key_t tls_key_t;
+<a name="l00237"></a>00237             <span class="keywordtype">void</span> create_key() { pthread_key_create(&amp;my_key, NULL); }
+<a name="l00238"></a>00238             <span class="keywordtype">void</span> destroy_key() { pthread_key_delete(my_key); }
+<a name="l00239"></a>00239             <span class="keywordtype">void</span> set_tls( <span class="keywordtype">void</span> * value )<span class="keyword"> const </span>{ pthread_setspecific(my_key, value); }
+<a name="l00240"></a>00240             <span class="keywordtype">void</span>* get_tls()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> pthread_getspecific(my_key); }
+<a name="l00241"></a>00241 <span class="preprocessor">#endif</span>
+<a name="l00242"></a>00242 <span class="preprocessor"></span>            tls_key_t my_key;
+<a name="l00243"></a>00243             <span class="keyword">virtual</span> <span class="keywordtype">void</span>* create_local() = 0;
+<a name="l00244"></a>00244             <span class="keyword">virtual</span> <span class="keywordtype">void</span>* create_array(size_t _size) = 0;  <span class="comment">// _size in bytes</span>
+<a name="l00245"></a>00245             <span class="keyword">virtual</span> <span class="keywordtype">void</span> free_array(<span class="keywordtype">void</span>* ptr, size_t _size) = 0; <span class="comment">// size in bytes</span>
+<a name="l00246"></a>00246         <span class="keyword">public</span>:
+<a name="l00247"></a>00247             ets_base() {create_key();}
+<a name="l00248"></a>00248             ~ets_base() {destroy_key();}
+<a name="l00249"></a>00249             <span class="keywordtype">void</span>* table_lookup( <span class="keywordtype">bool</span>&amp; exists ) {
+<a name="l00250"></a>00250                 <span class="keywordtype">void</span>* found = get_tls();
+<a name="l00251"></a>00251                 <span class="keywordflow">if</span>( found ) {
+<a name="l00252"></a>00252                     exists=<span class="keyword">true</span>;
+<a name="l00253"></a>00253                 } <span class="keywordflow">else</span> {
+<a name="l00254"></a>00254                     found = super::table_lookup(exists);
+<a name="l00255"></a>00255                     set_tls(found);
+<a name="l00256"></a>00256                 }
+<a name="l00257"></a>00257                 <span class="keywordflow">return</span> found; 
+<a name="l00258"></a>00258             }
+<a name="l00259"></a>00259             <span class="keywordtype">void</span> table_clear() {
+<a name="l00260"></a>00260                 destroy_key();
+<a name="l00261"></a>00261                 create_key(); 
+<a name="l00262"></a>00262                 super::table_clear();
+<a name="l00263"></a>00263             }
+<a name="l00264"></a>00264         };
+<a name="l00265"></a>00265 
+<a name="l00267"></a>00267         <span class="keyword">template</span>&lt; <span class="keyword">typename</span> Container, <span class="keyword">typename</span> Value &gt;
+<a name="l00268"></a>00268         <span class="keyword">class </span>enumerable_thread_specific_iterator 
+<a name="l00269"></a>00269 #if defined(_WIN64) &amp;&amp; defined(_MSC_VER) 
+<a name="l00270"></a>00270             <span class="comment">// Ensure that Microsoft's internal template function _Val_type works correctly.</span>
+<a name="l00271"></a>00271             : public std::iterator&lt;std::random_access_iterator_tag,Value&gt;
+<a name="l00272"></a>00272 #endif <span class="comment">/* defined(_WIN64) &amp;&amp; defined(_MSC_VER) */</span>
+<a name="l00273"></a>00273         {
+<a name="l00275"></a>00275         
+<a name="l00276"></a>00276             Container *my_container;
+<a name="l00277"></a>00277             <span class="keyword">typename</span> Container::size_type my_index;
+<a name="l00278"></a>00278             <span class="keyword">mutable</span> Value *my_value;
+<a name="l00279"></a>00279         
+<a name="l00280"></a>00280             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T&gt;
+<a name="l00281"></a>00281             <span class="keyword">friend</span> enumerable_thread_specific_iterator&lt;C,T&gt; operator+( ptrdiff_t offset, 
+<a name="l00282"></a>00282                                                                        <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,T&gt;&amp; v );
+<a name="l00283"></a>00283         
+<a name="l00284"></a>00284             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00285"></a>00285             <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,T&gt;&amp; i, 
+<a name="l00286"></a>00286                                     <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,U&gt;&amp; j );
+<a name="l00287"></a>00287         
+<a name="l00288"></a>00288             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00289"></a>00289             <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&lt;( const enumerable_thread_specific_iterator&lt;C,T&gt;&amp; i, 
+<a name="l00290"></a>00290                                    <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,U&gt;&amp; j );
+<a name="l00291"></a>00291         
+<a name="l00292"></a>00292             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00293"></a>00293             <span class="keyword">friend</span> ptrdiff_t operator-( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;C,U&gt;&amp; j );
+<a name="l00294"></a>00294             
+<a name="l00295"></a>00295             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> U&gt; 
+<a name="l00296"></a>00296             <span class="keyword">friend</span> <span class="keyword">class </span>enumerable_thread_specific_iterator;
+<a name="l00297"></a>00297         
+<a name="l00298"></a>00298             <span class="keyword">public</span>:
+<a name="l00299"></a>00299         
+<a name="l00300"></a>00300             enumerable_thread_specific_iterator( <span class="keyword">const</span> Container &amp;container, <span class="keyword">typename</span> Container::size_type index ) : 
+<a name="l00301"></a>00301                 my_container(&amp;const_cast&lt;Container &amp;&gt;(container)), my_index(index), my_value(NULL) {}
+<a name="l00302"></a>00302         
+<a name="l00304"></a>00304             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
+<a name="l00305"></a>00305         
+<a name="l00306"></a>00306             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt;
+<a name="l00307"></a>00307             enumerable_thread_specific_iterator( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container, U&gt;&amp; other ) :
+<a name="l00308"></a>00308                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast&lt;Value *&gt;(other.my_value) ) {}
+<a name="l00309"></a>00309         
+<a name="l00310"></a>00310             enumerable_thread_specific_iterator operator+( ptrdiff_t offset )<span class="keyword"> const </span>{
+<a name="l00311"></a>00311                 <span class="keywordflow">return</span> enumerable_thread_specific_iterator(*my_container, my_index + offset);
+<a name="l00312"></a>00312             }
+<a name="l00313"></a>00313         
+<a name="l00314"></a>00314             enumerable_thread_specific_iterator &amp;operator+=( ptrdiff_t offset ) {
+<a name="l00315"></a>00315                 my_index += offset;
+<a name="l00316"></a>00316                 my_value = NULL;
+<a name="l00317"></a>00317                 <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00318"></a>00318             }
+<a name="l00319"></a>00319         
+<a name="l00320"></a>00320             enumerable_thread_specific_iterator operator-( ptrdiff_t offset )<span class="keyword"> const </span>{
+<a name="l00321"></a>00321                 <span class="keywordflow">return</span> enumerable_thread_specific_iterator( *my_container, my_index-offset );
+<a name="l00322"></a>00322             }
+<a name="l00323"></a>00323         
+<a name="l00324"></a>00324             enumerable_thread_specific_iterator &amp;operator-=( ptrdiff_t offset ) {
+<a name="l00325"></a>00325                 my_index -= offset;
+<a name="l00326"></a>00326                 my_value = NULL;
+<a name="l00327"></a>00327                 <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00328"></a>00328             }
+<a name="l00329"></a>00329         
+<a name="l00330"></a>00330             Value&amp; operator*()<span class="keyword"> const </span>{
+<a name="l00331"></a>00331                 Value* value = my_value;
+<a name="l00332"></a>00332                 <span class="keywordflow">if</span>( !value ) {
+<a name="l00333"></a>00333                     value = my_value = reinterpret_cast&lt;Value *&gt;(&amp;(*my_container)[my_index].value);
+<a name="l00334"></a>00334                 }
+<a name="l00335"></a>00335                 __TBB_ASSERT( value==reinterpret_cast&lt;Value *&gt;(&amp;(*my_container)[my_index].value), <span class="stringliteral">"corrupt cache"</span> );
+<a name="l00336"></a>00336                 <span class="keywordflow">return</span> *value;
+<a name="l00337"></a>00337             }
+<a name="l00338"></a>00338         
+<a name="l00339"></a>00339             Value&amp; operator[]( ptrdiff_t k )<span class="keyword"> const </span>{
+<a name="l00340"></a>00340                <span class="keywordflow">return</span> (*my_container)[my_index + k].value;
+<a name="l00341"></a>00341             }
+<a name="l00342"></a>00342         
+<a name="l00343"></a>00343             Value* operator-&gt;()<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00344"></a>00344         
+<a name="l00345"></a>00345             enumerable_thread_specific_iterator&amp; operator++() {
+<a name="l00346"></a>00346                 ++my_index;
+<a name="l00347"></a>00347                 my_value = NULL;
+<a name="l00348"></a>00348                 <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00349"></a>00349             }
+<a name="l00350"></a>00350         
+<a name="l00351"></a>00351             enumerable_thread_specific_iterator&amp; operator--() {
+<a name="l00352"></a>00352                 --my_index;
+<a name="l00353"></a>00353                 my_value = NULL;
+<a name="l00354"></a>00354                 <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00355"></a>00355             }
+<a name="l00356"></a>00356         
+<a name="l00358"></a>00358             enumerable_thread_specific_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00359"></a>00359                 enumerable_thread_specific_iterator result = *<span class="keyword">this</span>;
+<a name="l00360"></a>00360                 ++my_index;
+<a name="l00361"></a>00361                 my_value = NULL;
+<a name="l00362"></a>00362                 <span class="keywordflow">return</span> result;
+<a name="l00363"></a>00363             }
+<a name="l00364"></a>00364         
+<a name="l00366"></a>00366             enumerable_thread_specific_iterator operator--(<span class="keywordtype">int</span>) {
+<a name="l00367"></a>00367                 enumerable_thread_specific_iterator result = *<span class="keyword">this</span>;
+<a name="l00368"></a>00368                 --my_index;
+<a name="l00369"></a>00369                 my_value = NULL;
+<a name="l00370"></a>00370                 <span class="keywordflow">return</span> result;
+<a name="l00371"></a>00371             }
+<a name="l00372"></a>00372         
+<a name="l00373"></a>00373             <span class="comment">// STL support</span>
+<a name="l00374"></a>00374             <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00375"></a>00375             <span class="keyword">typedef</span> Value value_type;
+<a name="l00376"></a>00376             <span class="keyword">typedef</span> Value* pointer;
+<a name="l00377"></a>00377             <span class="keyword">typedef</span> Value&amp; reference;
+<a name="l00378"></a>00378             <span class="keyword">typedef</span> std::random_access_iterator_tag iterator_category;
+<a name="l00379"></a>00379         };
+<a name="l00380"></a>00380         
+<a name="l00381"></a>00381         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T&gt;
+<a name="l00382"></a>00382         enumerable_thread_specific_iterator&lt;Container,T&gt; operator+( ptrdiff_t offset, 
+<a name="l00383"></a>00383                                                                     <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; v ) {
+<a name="l00384"></a>00384             <span class="keywordflow">return</span> enumerable_thread_specific_iterator&lt;Container,T&gt;( v.my_container, v.my_index + offset );
+<a name="l00385"></a>00385         }
+<a name="l00386"></a>00386         
+<a name="l00387"></a>00387         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00388"></a>00388         <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00389"></a>00389                          <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00390"></a>00390             <span class="keywordflow">return</span> i.my_index==j.my_index &amp;&amp; i.my_container == j.my_container;
+<a name="l00391"></a>00391         }
+<a name="l00392"></a>00392         
+<a name="l00393"></a>00393         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00394"></a>00394         <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00395"></a>00395                          <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00396"></a>00396             <span class="keywordflow">return</span> !(i==j);
+<a name="l00397"></a>00397         }
+<a name="l00398"></a>00398         
+<a name="l00399"></a>00399         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00400"></a>00400         <span class="keywordtype">bool</span> operator&lt;( const enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00401"></a>00401                         <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00402"></a>00402             <span class="keywordflow">return</span> i.my_index&lt;j.my_index;
+<a name="l00403"></a>00403         }
+<a name="l00404"></a>00404         
+<a name="l00405"></a>00405         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00406"></a>00406         <span class="keywordtype">bool</span> operator&gt;( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00407"></a>00407                         <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00408"></a>00408             <span class="keywordflow">return</span> j&lt;i;
+<a name="l00409"></a>00409         }
+<a name="l00410"></a>00410         
+<a name="l00411"></a>00411         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00412"></a>00412         <span class="keywordtype">bool</span> operator&gt;=( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00413"></a>00413                          <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00414"></a>00414             <span class="keywordflow">return</span> !(i&lt;j);
+<a name="l00415"></a>00415         }
+<a name="l00416"></a>00416         
+<a name="l00417"></a>00417         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00418"></a>00418         <span class="keywordtype">bool</span> operator&lt;=( const enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00419"></a>00419                          <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00420"></a>00420             <span class="keywordflow">return</span> !(j&lt;i);
+<a name="l00421"></a>00421         }
+<a name="l00422"></a>00422         
+<a name="l00423"></a>00423         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Container, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00424"></a>00424         ptrdiff_t operator-( <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,T&gt;&amp; i, 
+<a name="l00425"></a>00425                              <span class="keyword">const</span> enumerable_thread_specific_iterator&lt;Container,U&gt;&amp; j ) {
+<a name="l00426"></a>00426             <span class="keywordflow">return</span> i.my_index-j.my_index;
+<a name="l00427"></a>00427         }
+<a name="l00428"></a>00428 
+<a name="l00429"></a>00429     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> SegmentedContainer, <span class="keyword">typename</span> Value &gt;
+<a name="l00430"></a>00430         <span class="keyword">class </span>segmented_iterator
+<a name="l00431"></a>00431 #if defined(_WIN64) &amp;&amp; defined(_MSC_VER)
+<a name="l00432"></a>00432         : public std::iterator&lt;std::input_iterator_tag, Value&gt;
+<a name="l00433"></a>00433 #endif
+<a name="l00434"></a>00434         {
+<a name="l00435"></a>00435             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00436"></a>00436             <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> segmented_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> segmented_iterator&lt;C,U&gt;&amp; j);
+<a name="l00437"></a>00437 
+<a name="l00438"></a>00438             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00439"></a>00439             <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> segmented_iterator&lt;C,T&gt;&amp; i, <span class="keyword">const</span> segmented_iterator&lt;C,U&gt;&amp; j);
+<a name="l00440"></a>00440             
+<a name="l00441"></a>00441             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> C, <span class="keyword">typename</span> U&gt; 
+<a name="l00442"></a>00442             <span class="keyword">friend</span> <span class="keyword">class </span>segmented_iterator;
+<a name="l00443"></a>00443 
+<a name="l00444"></a>00444             <span class="keyword">public</span>:
+<a name="l00445"></a>00445 
+<a name="l00446"></a>00446                 segmented_iterator() {my_segcont = NULL;}
+<a name="l00447"></a>00447 
+<a name="l00448"></a>00448                 segmented_iterator( <span class="keyword">const</span> SegmentedContainer&amp; _segmented_container ) : 
+<a name="l00449"></a>00449                     my_segcont(const_cast&lt;SegmentedContainer*&gt;(&amp;_segmented_container)),
+<a name="l00450"></a>00450                     outer_iter(my_segcont-&gt;end()) { }
+<a name="l00451"></a>00451 
+<a name="l00452"></a>00452                 ~segmented_iterator() {}
+<a name="l00453"></a>00453 
+<a name="l00454"></a>00454                 <span class="keyword">typedef</span> <span class="keyword">typename</span> SegmentedContainer::iterator outer_iterator;
+<a name="l00455"></a>00455                 <span class="keyword">typedef</span> <span class="keyword">typename</span> SegmentedContainer::value_type InnerContainer;
+<a name="l00456"></a>00456                 <span class="keyword">typedef</span> <span class="keyword">typename</span> InnerContainer::iterator inner_iterator;
+<a name="l00457"></a>00457 
+<a name="l00458"></a>00458                 <span class="comment">// STL support</span>
+<a name="l00459"></a>00459                 <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00460"></a>00460                 <span class="keyword">typedef</span> Value value_type;
+<a name="l00461"></a>00461                 <span class="keyword">typedef</span> <span class="keyword">typename</span> SegmentedContainer::size_type size_type;
+<a name="l00462"></a>00462                 <span class="keyword">typedef</span> Value* pointer;
+<a name="l00463"></a>00463                 <span class="keyword">typedef</span> Value&amp; reference;
+<a name="l00464"></a>00464                 <span class="keyword">typedef</span> std::input_iterator_tag iterator_category;
+<a name="l00465"></a>00465 
+<a name="l00466"></a>00466                 <span class="comment">// Copy Constructor</span>
+<a name="l00467"></a>00467                 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt;
+<a name="l00468"></a>00468                 segmented_iterator(<span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer, U&gt;&amp; other) :
+<a name="l00469"></a>00469                     my_segcont(other.my_segcont),
+<a name="l00470"></a>00470                     outer_iter(other.outer_iter),
+<a name="l00471"></a>00471                     <span class="comment">// can we assign a default-constructed iterator to inner if we're at the end?</span>
+<a name="l00472"></a>00472                     inner_iter(other.inner_iter)
+<a name="l00473"></a>00473                 {}
+<a name="l00474"></a>00474 
+<a name="l00475"></a>00475                 <span class="comment">// assignment</span>
+<a name="l00476"></a>00476                 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt;
+<a name="l00477"></a>00477                 segmented_iterator&amp; operator=( <span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer, U&gt;&amp; other) {
+<a name="l00478"></a>00478                     <span class="keywordflow">if</span>(<span class="keyword">this</span> != &amp;other) {
+<a name="l00479"></a>00479                         my_segcont = other.my_segcont;
+<a name="l00480"></a>00480                         outer_iter = other.outer_iter;
+<a name="l00481"></a>00481                         <span class="keywordflow">if</span>(outer_iter != my_segcont-&gt;end()) inner_iter = other.inner_iter;
+<a name="l00482"></a>00482                     }
+<a name="l00483"></a>00483                     <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00484"></a>00484                 }
+<a name="l00485"></a>00485 
+<a name="l00486"></a>00486                 <span class="comment">// allow assignment of outer iterator to segmented iterator.  Once it is</span>
+<a name="l00487"></a>00487                 <span class="comment">// assigned, move forward until a non-empty inner container is found or</span>
+<a name="l00488"></a>00488                 <span class="comment">// the end of the outer container is reached.</span>
+<a name="l00489"></a>00489                 segmented_iterator&amp; operator=(<span class="keyword">const</span> outer_iterator&amp; new_outer_iter) {
+<a name="l00490"></a>00490                     __TBB_ASSERT(my_segcont != NULL, NULL);
+<a name="l00491"></a>00491                     <span class="comment">// check that this iterator points to something inside the segmented container</span>
+<a name="l00492"></a>00492                     <span class="keywordflow">for</span>(outer_iter = new_outer_iter ;outer_iter!=my_segcont-&gt;end(); ++outer_iter) {
+<a name="l00493"></a>00493                         <span class="keywordflow">if</span>( !outer_iter-&gt;empty() ) {
+<a name="l00494"></a>00494                             inner_iter = outer_iter-&gt;begin();
+<a name="l00495"></a>00495                             <span class="keywordflow">break</span>;
+<a name="l00496"></a>00496                         }
+<a name="l00497"></a>00497                     }
+<a name="l00498"></a>00498                     <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00499"></a>00499                 }
+<a name="l00500"></a>00500 
+<a name="l00501"></a>00501                 <span class="comment">// pre-increment</span>
+<a name="l00502"></a>00502                 segmented_iterator&amp; operator++() {
+<a name="l00503"></a>00503                     advance_me();
+<a name="l00504"></a>00504                     <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00505"></a>00505                 }
+<a name="l00506"></a>00506 
+<a name="l00507"></a>00507                 <span class="comment">// post-increment</span>
+<a name="l00508"></a>00508                 segmented_iterator operator++(<span class="keywordtype">int</span>) {
+<a name="l00509"></a>00509                     segmented_iterator tmp = *<span class="keyword">this</span>;
+<a name="l00510"></a>00510                     operator++();
+<a name="l00511"></a>00511                     <span class="keywordflow">return</span> tmp;
+<a name="l00512"></a>00512                 }
+<a name="l00513"></a>00513 
+<a name="l00514"></a>00514                 <span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> outer_iterator&amp; other_outer)<span class="keyword"> const </span>{
+<a name="l00515"></a>00515                     __TBB_ASSERT(my_segcont != NULL, NULL);
+<a name="l00516"></a>00516                     <span class="keywordflow">return</span> (outer_iter == other_outer &amp;&amp;
+<a name="l00517"></a>00517                             (outer_iter == my_segcont-&gt;end() || inner_iter == outer_iter-&gt;begin()));
+<a name="l00518"></a>00518                 }
+<a name="l00519"></a>00519 
+<a name="l00520"></a>00520                 <span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> outer_iterator&amp; other_outer)<span class="keyword"> const </span>{
+<a name="l00521"></a>00521                     <span class="keywordflow">return</span> !operator==(other_outer);
+<a name="l00522"></a>00522 
+<a name="l00523"></a>00523                 }
+<a name="l00524"></a>00524 
+<a name="l00525"></a>00525                 <span class="comment">// (i)* RHS</span>
+<a name="l00526"></a>00526                 reference operator*()<span class="keyword"> const </span>{
+<a name="l00527"></a>00527                     __TBB_ASSERT(my_segcont != NULL, NULL);
+<a name="l00528"></a>00528                     __TBB_ASSERT(outer_iter != my_segcont-&gt;end(), <span class="stringliteral">"Dereferencing a pointer at end of container"</span>);
+<a name="l00529"></a>00529                     __TBB_ASSERT(inner_iter != outer_iter-&gt;end(), NULL); <span class="comment">// should never happen</span>
+<a name="l00530"></a>00530                     <span class="keywordflow">return</span> *inner_iter;
+<a name="l00531"></a>00531                 }
+<a name="l00532"></a>00532 
+<a name="l00533"></a>00533                 <span class="comment">// i-&gt;</span>
+<a name="l00534"></a>00534                 pointer operator-&gt;()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> &amp;operator*();}
+<a name="l00535"></a>00535 
+<a name="l00536"></a>00536             <span class="keyword">private</span>:
+<a name="l00537"></a>00537                 SegmentedContainer*             my_segcont;
+<a name="l00538"></a>00538                 outer_iterator outer_iter;
+<a name="l00539"></a>00539                 inner_iterator inner_iter;
+<a name="l00540"></a>00540 
+<a name="l00541"></a>00541                 <span class="keywordtype">void</span> advance_me() {
+<a name="l00542"></a>00542                     __TBB_ASSERT(my_segcont != NULL, NULL);
+<a name="l00543"></a>00543                     __TBB_ASSERT(outer_iter != my_segcont-&gt;end(), NULL); <span class="comment">// not true if there are no inner containers</span>
+<a name="l00544"></a>00544                     __TBB_ASSERT(inner_iter != outer_iter-&gt;end(), NULL); <span class="comment">// not true if the inner containers are all empty.</span>
+<a name="l00545"></a>00545                     ++inner_iter;
+<a name="l00546"></a>00546                     <span class="keywordflow">while</span>(inner_iter == outer_iter-&gt;end() &amp;&amp; ++outer_iter != my_segcont-&gt;end()) {
+<a name="l00547"></a>00547                         inner_iter = outer_iter-&gt;begin();
+<a name="l00548"></a>00548                     }
+<a name="l00549"></a>00549                 }
+<a name="l00550"></a>00550         };    <span class="comment">// segmented_iterator</span>
+<a name="l00551"></a>00551 
+<a name="l00552"></a>00552         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> SegmentedContainer, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00553"></a>00553         <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer,T&gt;&amp; i, 
+<a name="l00554"></a>00554                          <span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer,U&gt;&amp; j ) {
+<a name="l00555"></a>00555             <span class="keywordflow">if</span>(i.my_segcont != j.my_segcont) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00556"></a>00556             <span class="keywordflow">if</span>(i.my_segcont == NULL) <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00557"></a>00557             <span class="keywordflow">if</span>(i.outer_iter != j.outer_iter) <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00558"></a>00558             <span class="keywordflow">if</span>(i.outer_iter == i.my_segcont-&gt;end()) <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00559"></a>00559             <span class="keywordflow">return</span> i.inner_iter == j.inner_iter;
+<a name="l00560"></a>00560         }
+<a name="l00561"></a>00561 
+<a name="l00562"></a>00562         <span class="comment">// !=</span>
+<a name="l00563"></a>00563         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> SegmentedContainer, <span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00564"></a>00564         <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer,T&gt;&amp; i, 
+<a name="l00565"></a>00565                          <span class="keyword">const</span> segmented_iterator&lt;SegmentedContainer,U&gt;&amp; j ) {
+<a name="l00566"></a>00566             <span class="keywordflow">return</span> !(i==j);
+<a name="l00567"></a>00567         }
+<a name="l00568"></a>00568 
+<a name="l00569"></a>00569         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00570"></a>00570         <span class="keyword">struct </span>destruct_only: tbb::internal::no_copy {
+<a name="l00571"></a>00571             <a class="code" href="a00146.html">tbb::aligned_space&lt;T,1&gt;</a> value;
+<a name="l00572"></a>00572             ~destruct_only() {value.begin()[0].~T();}
+<a name="l00573"></a>00573         };
+<a name="l00574"></a>00574 
+<a name="l00575"></a>00575         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00576"></a>00576         <span class="keyword">struct </span>construct_by_default: tbb::internal::no_assign {
+<a name="l00577"></a>00577             <span class="keywordtype">void</span> construct(<span class="keywordtype">void</span>*where) {<span class="keyword">new</span>(where) T();} <span class="comment">// C++ note: the () in T() ensure zero initialization.</span>
+<a name="l00578"></a>00578             construct_by_default( <span class="keywordtype">int</span> ) {}
+<a name="l00579"></a>00579         };
+<a name="l00580"></a>00580 
+<a name="l00581"></a>00581         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00582"></a>00582         <span class="keyword">struct </span>construct_by_exemplar: tbb::internal::no_assign {
+<a name="l00583"></a>00583             <span class="keyword">const</span> T exemplar;
+<a name="l00584"></a>00584             <span class="keywordtype">void</span> construct(<span class="keywordtype">void</span>*where) {<span class="keyword">new</span>(where) T(exemplar);}
+<a name="l00585"></a>00585             construct_by_exemplar( <span class="keyword">const</span> T&amp; t ) : exemplar(t) {}
+<a name="l00586"></a>00586         };
+<a name="l00587"></a>00587 
+<a name="l00588"></a>00588         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Finit&gt;
+<a name="l00589"></a>00589         <span class="keyword">struct </span>construct_by_finit: tbb::internal::no_assign {
+<a name="l00590"></a>00590             Finit f;
+<a name="l00591"></a>00591             <span class="keywordtype">void</span> construct(<span class="keywordtype">void</span>* where) {<span class="keyword">new</span>(where) T(f());}
+<a name="l00592"></a>00592             construct_by_finit( <span class="keyword">const</span> Finit&amp; f_ ) : f(f_) {}
+<a name="l00593"></a>00593         };
+<a name="l00594"></a>00594 
+<a name="l00595"></a>00595         <span class="comment">// storage for initialization function pointer</span>
+<a name="l00596"></a>00596         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00597"></a>00597         <span class="keyword">class </span>callback_base {
+<a name="l00598"></a>00598         <span class="keyword">public</span>:
+<a name="l00599"></a>00599             <span class="comment">// Clone *this</span>
+<a name="l00600"></a>00600             <span class="keyword">virtual</span> callback_base* clone() = 0;
+<a name="l00601"></a>00601             <span class="comment">// Destruct and free *this</span>
+<a name="l00602"></a>00602             <span class="keyword">virtual</span> <span class="keywordtype">void</span> destroy() = 0;
+<a name="l00603"></a>00603             <span class="comment">// Need virtual destructor to satisfy GCC compiler warning</span>
+<a name="l00604"></a>00604             <span class="keyword">virtual</span> ~callback_base() { }
+<a name="l00605"></a>00605             <span class="comment">// Construct T at where</span>
+<a name="l00606"></a>00606             <span class="keyword">virtual</span> <span class="keywordtype">void</span> construct(<span class="keywordtype">void</span>* where) = 0;
+<a name="l00607"></a>00607         };
+<a name="l00608"></a>00608 
+<a name="l00609"></a>00609         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Constructor&gt;
+<a name="l00610"></a>00610         <span class="keyword">class </span>callback_leaf: <span class="keyword">public</span> callback_base&lt;T&gt;, Constructor {
+<a name="l00611"></a>00611             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X&gt; callback_leaf( <span class="keyword">const</span> X&amp; x ) : Constructor(x) {}
+<a name="l00612"></a>00612 
+<a name="l00613"></a>00613             <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00209.html">tbb::tbb_allocator&lt;callback_leaf&gt;</a> my_allocator_type;
+<a name="l00614"></a>00614 
+<a name="l00615"></a>00615             <span class="comment">/*override*/</span> callback_base&lt;T&gt;* clone() {
+<a name="l00616"></a>00616                 <span class="keywordtype">void</span>* where = my_allocator_type().allocate(1);
+<a name="l00617"></a>00617                 <span class="keywordflow">return</span> <span class="keyword">new</span>(where) callback_leaf(*<span class="keyword">this</span>);
+<a name="l00618"></a>00618             }
+<a name="l00619"></a>00619 
+<a name="l00620"></a>00620             <span class="comment">/*override*/</span> <span class="keywordtype">void</span> destroy() {
+<a name="l00621"></a>00621                 my_allocator_type().destroy(<span class="keyword">this</span>);
+<a name="l00622"></a>00622                 my_allocator_type().deallocate(<span class="keyword">this</span>,1);
+<a name="l00623"></a>00623             }
+<a name="l00624"></a>00624 
+<a name="l00625"></a>00625             <span class="comment">/*override*/</span> <span class="keywordtype">void</span> construct(<span class="keywordtype">void</span>* where) {
+<a name="l00626"></a>00626                 Constructor::construct(where);
+<a name="l00627"></a>00627             }  
+<a name="l00628"></a>00628         <span class="keyword">public</span>:
+<a name="l00629"></a>00629             <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X&gt;
+<a name="l00630"></a>00630             <span class="keyword">static</span> callback_base&lt;T&gt;* make( <span class="keyword">const</span> X&amp; x ) {
+<a name="l00631"></a>00631                 <span class="keywordtype">void</span>* where = my_allocator_type().allocate(1);
+<a name="l00632"></a>00632                 <span class="keywordflow">return</span> <span class="keyword">new</span>(where) callback_leaf(x);
+<a name="l00633"></a>00633             }
+<a name="l00634"></a>00634         };
+<a name="l00635"></a>00635 
+<a name="l00637"></a>00637 
+<a name="l00642"></a>00642         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, size_t ModularSize&gt;
+<a name="l00643"></a>00643         <span class="keyword">struct </span>ets_element {
+<a name="l00644"></a>00644             <span class="keywordtype">char</span> value[ModularSize==0 ? <span class="keyword">sizeof</span>(U) : <span class="keyword">sizeof</span>(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
+<a name="l00645"></a>00645             <span class="keywordtype">void</span> unconstruct() {
+<a name="l00646"></a>00646                 tbb::internal::punned_cast&lt;U*&gt;(&amp;value)-&gt;~U();
+<a name="l00647"></a>00647             }
+<a name="l00648"></a>00648         };
+<a name="l00649"></a>00649 
+<a name="l00650"></a>00650     } <span class="comment">// namespace internal</span>
+<a name="l00652"></a>00652 <span class="comment"></span>
+<a name="l00654"></a>00654 
+<a name="l00673"></a>00673     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, 
+<a name="l00674"></a>00674               <span class="keyword">typename</span> Allocator=cache_aligned_allocator&lt;T&gt;, 
+<a name="l00675"></a><a class="code" href="a00168.html">00675</a>               <a class="code" href="a00272.html#a8622ae61b7e7737dac26542e181178e">ets_key_usage_type</a> ETS_key_type=ets_no_key &gt; 
+<a name="l00676"></a>00676     <span class="keyword">class </span><a class="code" href="a00168.html">enumerable_thread_specific</a>: internal::ets_base&lt;ETS_key_type&gt; { 
+<a name="l00677"></a>00677 
+<a name="l00678"></a>00678         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> A, ets_key_usage_type C&gt; <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00168.html">enumerable_thread_specific</a>;
+<a name="l00679"></a>00679     
+<a name="l00680"></a>00680         <span class="keyword">typedef</span> internal::ets_element&lt;T,sizeof(T)%tbb::internal::NFS_MaxLineSize&gt; padded_element;
+<a name="l00681"></a>00681 
+<a name="l00683"></a>00683         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> I&gt;
+<a name="l00684"></a>00684         <span class="keyword">class </span>generic_range_type: <span class="keyword">public</span> <a class="code" href="a00152.html">blocked_range</a>&lt;I&gt; {
+<a name="l00685"></a>00685         <span class="keyword">public</span>:
+<a name="l00686"></a>00686             <span class="keyword">typedef</span> T value_type;
+<a name="l00687"></a>00687             <span class="keyword">typedef</span> T&amp; reference;
+<a name="l00688"></a>00688             <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; const_reference;
+<a name="l00689"></a>00689             <span class="keyword">typedef</span> I iterator;
+<a name="l00690"></a>00690             <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00691"></a>00691             generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(begin_,end_,grainsize_) {} 
+<a name="l00692"></a>00692             template&lt;typename U&gt;
+<a name="l00693"></a>00693             generic_range_type( <span class="keyword">const</span> generic_range_type&lt;U&gt;&amp; r) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(r.begin(),r.end(),r.grainsize()) {} 
+<a name="l00694"></a>00694             generic_range_type( generic_range_type&amp; r, <a class="code" href="a00203.html">split</a> ) : <a class="code" href="a00152.html">blocked_range&lt;I&gt;</a>(r,<a class="code" href="a00203.html">split</a>()) {}
+<a name="l00695"></a>00695         };
+<a name="l00696"></a>00696     
+<a name="l00697"></a>00697         <span class="keyword">typedef</span> <span class="keyword">typename</span> Allocator::template rebind&lt; padded_element &gt;::other padded_allocator_type;
+<a name="l00698"></a>00698         <span class="keyword">typedef</span> <a class="code" href="a00166.html">tbb::concurrent_vector&lt; padded_element, padded_allocator_type &gt;</a> <a class="code" href="a00166.html">internal_collection_type</a>;
+<a name="l00699"></a>00699         
+<a name="l00700"></a>00700         internal::callback_base&lt;T&gt; *my_construct_callback;
+<a name="l00701"></a>00701 
+<a name="l00702"></a>00702         internal_collection_type my_locals;
+<a name="l00703"></a>00703    
+<a name="l00704"></a>00704         <span class="comment">/*override*/</span> <span class="keywordtype">void</span>* create_local() {
+<a name="l00705"></a>00705 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00706"></a>00706 <span class="preprocessor"></span>            <span class="keywordtype">void</span>* lref = &amp;my_locals[my_locals.<a class="code" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a>(padded_element())];
+<a name="l00707"></a>00707 <span class="preprocessor">#else</span>
+<a name="l00708"></a>00708 <span class="preprocessor"></span>            <span class="keywordtype">void</span>* lref = &amp;*my_locals.<a class="code" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a>(padded_element());
+<a name="l00709"></a>00709 <span class="preprocessor">#endif</span>
+<a name="l00710"></a>00710 <span class="preprocessor"></span>            my_construct_callback-&gt;construct(lref);
+<a name="l00711"></a>00711             <span class="keywordflow">return</span> lref;
+<a name="l00712"></a>00712         } 
+<a name="l00713"></a>00713 
+<a name="l00714"></a>00714         <span class="keywordtype">void</span> unconstruct_locals() {
+<a name="l00715"></a>00715             <span class="keywordflow">for</span>(<span class="keyword">typename</span> internal_collection_type::iterator cvi = my_locals.<a class="code" href="a00166.html#730b23a251ecb6d37f692fb22f38e029">begin</a>(); cvi != my_locals.<a class="code" href="a00166.html#c0b51160e5a764982ec97a455f94f2c6">end</a>(); ++cvi) {
+<a name="l00716"></a>00716                 cvi-&gt;unconstruct();
+<a name="l00717"></a>00717             }
+<a name="l00718"></a>00718         }
+<a name="l00719"></a>00719 
+<a name="l00720"></a>00720         <span class="keyword">typedef</span> <span class="keyword">typename</span> Allocator::template rebind&lt; uintptr_t &gt;::other array_allocator_type;
+<a name="l00721"></a>00721 
+<a name="l00722"></a>00722         <span class="comment">// _size is in bytes</span>
+<a name="l00723"></a>00723         <span class="comment">/*override*/</span> <span class="keywordtype">void</span>* create_array(size_t _size) {
+<a name="l00724"></a>00724             size_t nelements = (_size + <span class="keyword">sizeof</span>(uintptr_t) -1) / <span class="keyword">sizeof</span>(uintptr_t);
+<a name="l00725"></a>00725             <span class="keywordflow">return</span> array_allocator_type().allocate(nelements);
+<a name="l00726"></a>00726         }
+<a name="l00727"></a>00727 
+<a name="l00728"></a>00728         <span class="comment">/*override*/</span> <span class="keywordtype">void</span> free_array( <span class="keywordtype">void</span>* _ptr, size_t _size) {
+<a name="l00729"></a>00729             size_t nelements = (_size + <span class="keyword">sizeof</span>(uintptr_t) -1) / <span class="keyword">sizeof</span>(uintptr_t);
+<a name="l00730"></a>00730             array_allocator_type().deallocate( reinterpret_cast&lt;uintptr_t *&gt;(_ptr),nelements);
+<a name="l00731"></a>00731         }
+<a name="l00732"></a>00732    
+<a name="l00733"></a>00733     <span class="keyword">public</span>:
+<a name="l00734"></a>00734     
+<a name="l00736"></a>00736         <span class="keyword">typedef</span> Allocator allocator_type;
+<a name="l00737"></a>00737         <span class="keyword">typedef</span> T value_type;
+<a name="l00738"></a>00738         <span class="keyword">typedef</span> T&amp; reference;
+<a name="l00739"></a>00739         <span class="keyword">typedef</span> <span class="keyword">const</span> T&amp; const_reference;
+<a name="l00740"></a>00740         <span class="keyword">typedef</span> T* pointer;
+<a name="l00741"></a>00741         <span class="keyword">typedef</span> <span class="keyword">const</span> T* const_pointer;
+<a name="l00742"></a>00742         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal_collection_type::size_type size_type;
+<a name="l00743"></a>00743         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal_collection_type::difference_type difference_type;
+<a name="l00744"></a>00744     
+<a name="l00745"></a>00745         <span class="comment">// Iterator types</span>
+<a name="l00746"></a>00746         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::enumerable_thread_specific_iterator&lt; internal_collection_type, value_type &gt; iterator;
+<a name="l00747"></a>00747         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::enumerable_thread_specific_iterator&lt; internal_collection_type, const value_type &gt; const_iterator;
+<a name="l00748"></a>00748 
+<a name="l00749"></a>00749         <span class="comment">// Parallel range types</span>
+<a name="l00750"></a>00750         <span class="keyword">typedef</span> generic_range_type&lt; iterator &gt; range_type;
+<a name="l00751"></a>00751         <span class="keyword">typedef</span> generic_range_type&lt; const_iterator &gt; const_range_type;
+<a name="l00752"></a>00752     
+<a name="l00754"></a>00754         <a class="code" href="a00168.html">enumerable_thread_specific</a>() : 
+<a name="l00755"></a>00755             my_construct_callback( internal::callback_leaf&lt;T,internal::construct_by_default&lt;T&gt; &gt;::make(<span class="comment">/*dummy argument*/</span>0) ) 
+<a name="l00756"></a>00756         {}
+<a name="l00757"></a>00757 
+<a name="l00759"></a><a class="code" href="a00168.html#8d4b456ff9d7b289c73254eccc11db45">00759</a>         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Finit&gt;
+<a name="l00760"></a>00760         <a class="code" href="a00168.html">enumerable_thread_specific</a>( Finit finit ) : 
+<a name="l00761"></a>00761             my_construct_callback( internal::callback_leaf&lt;T,internal::construct_by_finit&lt;T,Finit&gt; &gt;::make( finit ) ) 
+<a name="l00762"></a>00762         {}
+<a name="l00763"></a>00763     
+<a name="l00765"></a>00765         <a class="code" href="a00168.html">enumerable_thread_specific</a>(<span class="keyword">const</span> T&amp; exemplar) : 
+<a name="l00766"></a>00766             my_construct_callback( internal::callback_leaf&lt;T,internal::construct_by_exemplar&lt;T&gt; &gt;::make( exemplar ) )
+<a name="l00767"></a>00767         {}
+<a name="l00768"></a>00768     
+<a name="l00770"></a>00770         ~<a class="code" href="a00168.html">enumerable_thread_specific</a>() { 
+<a name="l00771"></a>00771             my_construct_callback-&gt;destroy();
+<a name="l00772"></a>00772             this-&gt;clear();  <span class="comment">// deallocation before the derived class is finished destructing</span>
+<a name="l00773"></a>00773             <span class="comment">// So free(array *) is still accessible</span>
+<a name="l00774"></a>00774         }
+<a name="l00775"></a>00775       
+<a name="l00777"></a>00777         reference local() {
+<a name="l00778"></a>00778             <span class="keywordtype">bool</span> exists;
+<a name="l00779"></a>00779             <span class="keywordflow">return</span> local(exists);
+<a name="l00780"></a>00780         }
+<a name="l00781"></a>00781 
+<a name="l00783"></a>00783         reference local(<span class="keywordtype">bool</span>&amp; exists)  {
+<a name="l00784"></a>00784             __TBB_ASSERT(ETS_key_type==ets_no_key,<span class="stringliteral">"ets_key_per_instance not yet implemented"</span>); 
+<a name="l00785"></a>00785             <span class="keywordtype">void</span>* ptr = this-&gt;table_lookup(exists);
+<a name="l00786"></a>00786             <span class="keywordflow">return</span> *(T*)ptr;
+<a name="l00787"></a>00787         }
+<a name="l00788"></a>00788 
+<a name="l00790"></a>00790         size_type size()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_locals.<a class="code" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a>(); }
+<a name="l00791"></a>00791     
+<a name="l00793"></a>00793         <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_locals.<a class="code" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">empty</a>(); }
+<a name="l00794"></a>00794     
+<a name="l00796"></a>00796         iterator begin() { <span class="keywordflow">return</span> iterator( my_locals, 0 ); }
+<a name="l00798"></a>00798         iterator end() { <span class="keywordflow">return</span> iterator(my_locals, my_locals.<a class="code" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a>() ); }
+<a name="l00799"></a>00799     
+<a name="l00801"></a>00801         const_iterator begin()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> const_iterator(my_locals, 0); }
+<a name="l00802"></a>00802     
+<a name="l00804"></a>00804         const_iterator end()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> const_iterator(my_locals, my_locals.<a class="code" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a>()); }
+<a name="l00805"></a>00805 
+<a name="l00807"></a>00807         range_type range( size_t grainsize=1 ) { <span class="keywordflow">return</span> range_type( begin(), end(), grainsize ); } 
+<a name="l00808"></a>00808         
+<a name="l00810"></a>00810         const_range_type range( size_t grainsize=1 )<span class="keyword"> const </span>{ <span class="keywordflow">return</span> const_range_type( begin(), end(), grainsize ); }
+<a name="l00811"></a>00811 
+<a name="l00813"></a>00813         <span class="keywordtype">void</span> clear() {
+<a name="l00814"></a>00814             unconstruct_locals();
+<a name="l00815"></a>00815             my_locals.<a class="code" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">clear</a>();
+<a name="l00816"></a>00816             this-&gt;table_clear();
+<a name="l00817"></a>00817             <span class="comment">// callback is not destroyed</span>
+<a name="l00818"></a>00818             <span class="comment">// exemplar is not destroyed</span>
+<a name="l00819"></a>00819         }
+<a name="l00820"></a>00820 
+<a name="l00821"></a>00821     <span class="keyword">private</span>:
+<a name="l00822"></a>00822 
+<a name="l00823"></a>00823         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> A2, ets_key_usage_type C2&gt;
+<a name="l00824"></a>00824         <span class="keywordtype">void</span> internal_copy( <span class="keyword">const</span> enumerable_thread_specific&lt;U, A2, C2&gt;&amp; other);
+<a name="l00825"></a>00825 
+<a name="l00826"></a>00826     <span class="keyword">public</span>:
+<a name="l00827"></a>00827 
+<a name="l00828"></a>00828         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> Alloc, ets_key_usage_type Cachetype&gt;
+<a name="l00829"></a>00829         enumerable_thread_specific( <span class="keyword">const</span> enumerable_thread_specific&lt;U, Alloc, Cachetype&gt;&amp; other ) : internal::ets_base&lt;ETS_key_type&gt; ()
+<a name="l00830"></a>00830         {
+<a name="l00831"></a>00831             internal_copy(other);
+<a name="l00832"></a>00832         }
+<a name="l00833"></a>00833 
+<a name="l00834"></a>00834         enumerable_thread_specific( <span class="keyword">const</span> enumerable_thread_specific&amp; other ) : internal::ets_base&lt;ETS_key_type&gt; ()
+<a name="l00835"></a>00835         {
+<a name="l00836"></a>00836             internal_copy(other);
+<a name="l00837"></a>00837         }
+<a name="l00838"></a>00838 
+<a name="l00839"></a>00839     <span class="keyword">private</span>:
+<a name="l00840"></a>00840 
+<a name="l00841"></a>00841         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> A2, ets_key_usage_type C2&gt;
+<a name="l00842"></a>00842         enumerable_thread_specific &amp;
+<a name="l00843"></a>00843         internal_assign(<span class="keyword">const</span> enumerable_thread_specific&lt;U, A2, C2&gt;&amp; other) {
+<a name="l00844"></a>00844             <span class="keywordflow">if</span>(static_cast&lt;void *&gt;( <span class="keyword">this</span> ) != static_cast&lt;const void *&gt;( &amp;other )) {
+<a name="l00845"></a>00845                 this-&gt;clear(); 
+<a name="l00846"></a>00846                 my_construct_callback-&gt;destroy();
+<a name="l00847"></a>00847                 my_construct_callback = 0;
+<a name="l00848"></a>00848                 internal_copy( other );
+<a name="l00849"></a>00849             }
+<a name="l00850"></a>00850             <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00851"></a>00851         }
+<a name="l00852"></a>00852 
+<a name="l00853"></a>00853     <span class="keyword">public</span>:
+<a name="l00854"></a>00854 
+<a name="l00855"></a>00855         <span class="comment">// assignment</span>
+<a name="l00856"></a>00856         enumerable_thread_specific&amp; operator=(<span class="keyword">const</span> enumerable_thread_specific&amp; other) {
+<a name="l00857"></a>00857             <span class="keywordflow">return</span> internal_assign(other);
+<a name="l00858"></a>00858         }
+<a name="l00859"></a>00859 
+<a name="l00860"></a>00860         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> Alloc, ets_key_usage_type Cachetype&gt;
+<a name="l00861"></a>00861         enumerable_thread_specific&amp; operator=(<span class="keyword">const</span> enumerable_thread_specific&lt;U, Alloc, Cachetype&gt;&amp; other)
+<a name="l00862"></a>00862         {
+<a name="l00863"></a>00863             <span class="keywordflow">return</span> internal_assign(other);
+<a name="l00864"></a>00864         }
+<a name="l00865"></a>00865 
+<a name="l00866"></a>00866         <span class="comment">// combine_func_t has signature T(T,T) or T(const T&amp;, const T&amp;)</span>
+<a name="l00867"></a>00867         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> combine_func_t&gt;
+<a name="l00868"></a>00868         T combine(combine_func_t f_combine) {
+<a name="l00869"></a>00869             <span class="keywordflow">if</span>(begin() == end()) {
+<a name="l00870"></a>00870                 internal::destruct_only&lt;T&gt; location;
+<a name="l00871"></a>00871                 my_construct_callback-&gt;construct(location.value.begin());
+<a name="l00872"></a>00872                 <span class="keywordflow">return</span> *location.value.begin();
+<a name="l00873"></a>00873             }
+<a name="l00874"></a>00874             const_iterator ci = begin();
+<a name="l00875"></a>00875             T my_result = *ci;
+<a name="l00876"></a>00876             <span class="keywordflow">while</span>(++ci != end()) 
+<a name="l00877"></a>00877                 my_result = f_combine( my_result, *ci );
+<a name="l00878"></a>00878             <span class="keywordflow">return</span> my_result;
+<a name="l00879"></a>00879         }
+<a name="l00880"></a>00880 
+<a name="l00881"></a>00881         <span class="comment">// combine_func_t has signature void(T) or void(const T&amp;)</span>
+<a name="l00882"></a>00882         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> combine_func_t&gt;
+<a name="l00883"></a>00883         <span class="keywordtype">void</span> combine_each(combine_func_t f_combine) {
+<a name="l00884"></a>00884             <span class="keywordflow">for</span>(const_iterator ci = begin(); ci != end(); ++ci) {
+<a name="l00885"></a>00885                 f_combine( *ci );
+<a name="l00886"></a>00886             }
+<a name="l00887"></a>00887         }
+<a name="l00888"></a>00888 
+<a name="l00889"></a>00889     }; <span class="comment">// enumerable_thread_specific</span>
+<a name="l00890"></a>00890 
+<a name="l00891"></a>00891     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Allocator, ets_key_usage_type ETS_key_type&gt; 
+<a name="l00892"></a>00892     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> A2, ets_key_usage_type C2&gt;
+<a name="l00893"></a>00893     <span class="keywordtype">void</span> enumerable_thread_specific&lt;T,Allocator,ETS_key_type&gt;::internal_copy( <span class="keyword">const</span> enumerable_thread_specific&lt;U, A2, C2&gt;&amp; other) {
+<a name="l00894"></a>00894         <span class="comment">// Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.</span>
+<a name="l00895"></a>00895         my_construct_callback = other.my_construct_callback-&gt;clone();
+<a name="l00896"></a>00896 
+<a name="l00897"></a>00897         <span class="keyword">typedef</span> internal::ets_base&lt;ets_no_key&gt; base;
+<a name="l00898"></a>00898         __TBB_ASSERT(my_locals.<a class="code" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">size</a>()==0,NULL);
+<a name="l00899"></a>00899         this-&gt;table_reserve_for_copy( other );
+<a name="l00900"></a>00900         <span class="keywordflow">for</span>( base::array* r=other.my_root; r; r=r-&gt;next ) {
+<a name="l00901"></a>00901             <span class="keywordflow">for</span>( size_t i=0; i&lt;r-&gt;<a class="code" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">size</a>(); ++i ) {
+<a name="l00902"></a>00902                 base::slot&amp; s1 = r-&gt;at(i);
+<a name="l00903"></a>00903                 <span class="keywordflow">if</span>( !s1.empty() ) {
+<a name="l00904"></a>00904                     base::slot&amp; s2 = this-&gt;table_find(s1.key);
+<a name="l00905"></a>00905                     <span class="keywordflow">if</span>( s2.empty() ) { 
+<a name="l00906"></a>00906 #<span class="keywordflow">if</span> TBB_DEPRECATED
+<a name="l00907"></a>00907                         <span class="keywordtype">void</span>* lref = &amp;my_locals[my_locals.<a class="code" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a>(padded_element())];
+<a name="l00908"></a>00908 <span class="preprocessor">#else</span>
+<a name="l00909"></a>00909 <span class="preprocessor"></span>                        <span class="keywordtype">void</span>* lref = &amp;*my_locals.<a class="code" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">push_back</a>(padded_element());
+<a name="l00910"></a>00910 <span class="preprocessor">#endif</span>
+<a name="l00911"></a>00911 <span class="preprocessor"></span>                        s2.ptr = <span class="keyword">new</span>(lref) T(*(U*)s1.ptr);
+<a name="l00912"></a>00912                         s2.key = s1.key;
+<a name="l00913"></a>00913                     } <span class="keywordflow">else</span> {
+<a name="l00914"></a>00914                         <span class="comment">// Skip the duplicate</span>
+<a name="l00915"></a>00915                     } 
+<a name="l00916"></a>00916                 }
+<a name="l00917"></a>00917             }
+<a name="l00918"></a>00918         }
+<a name="l00919"></a>00919     }
+<a name="l00920"></a>00920 
+<a name="l00921"></a>00921     <span class="keyword">template</span>&lt; <span class="keyword">typename</span> Container &gt;
+<a name="l00922"></a>00922     <span class="keyword">class </span>flattened2d {
+<a name="l00923"></a>00923 
+<a name="l00924"></a>00924         <span class="comment">// This intermediate typedef is to address issues with VC7.1 compilers</span>
+<a name="l00925"></a>00925         <span class="keyword">typedef</span> <span class="keyword">typename</span> Container::value_type conval_type;
+<a name="l00926"></a>00926 
+<a name="l00927"></a>00927     <span class="keyword">public</span>:
+<a name="l00928"></a>00928 
+<a name="l00930"></a>00930         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::size_type size_type;
+<a name="l00931"></a>00931         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::difference_type difference_type;
+<a name="l00932"></a>00932         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::allocator_type allocator_type;
+<a name="l00933"></a>00933         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::value_type value_type;
+<a name="l00934"></a>00934         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::reference reference;
+<a name="l00935"></a>00935         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::const_reference const_reference;
+<a name="l00936"></a>00936         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::pointer pointer;
+<a name="l00937"></a>00937         <span class="keyword">typedef</span> <span class="keyword">typename</span> conval_type::const_pointer const_pointer;
+<a name="l00938"></a>00938 
+<a name="l00939"></a>00939         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::segmented_iterator&lt;Container, value_type&gt; iterator;
+<a name="l00940"></a>00940         <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::segmented_iterator&lt;Container, const value_type&gt; const_iterator;
+<a name="l00941"></a>00941 
+<a name="l00942"></a>00942         flattened2d( <span class="keyword">const</span> Container &amp;c, <span class="keyword">typename</span> Container::const_iterator b, <span class="keyword">typename</span> Container::const_iterator e ) : 
+<a name="l00943"></a>00943             my_container(const_cast&lt;Container*&gt;(&amp;c)), my_begin(b), my_end(e) { }
+<a name="l00944"></a>00944 
+<a name="l00945"></a>00945         flattened2d( <span class="keyword">const</span> Container &amp;c ) : 
+<a name="l00946"></a>00946             my_container(const_cast&lt;Container*&gt;(&amp;c)), my_begin(c.begin()), my_end(c.end()) { }
+<a name="l00947"></a>00947 
+<a name="l00948"></a>00948         iterator begin() { <span class="keywordflow">return</span> iterator(*my_container) = my_begin; }
+<a name="l00949"></a>00949         iterator end() { <span class="keywordflow">return</span> iterator(*my_container) = my_end; }
+<a name="l00950"></a>00950         const_iterator begin()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> const_iterator(*my_container) = my_begin; }
+<a name="l00951"></a>00951         const_iterator end()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> const_iterator(*my_container) = my_end; }
+<a name="l00952"></a>00952 
+<a name="l00953"></a>00953         size_type size()<span class="keyword"> const </span>{
+<a name="l00954"></a>00954             size_type tot_size = 0;
+<a name="l00955"></a>00955             <span class="keywordflow">for</span>(<span class="keyword">typename</span> Container::const_iterator i = my_begin; i != my_end; ++i) {
+<a name="l00956"></a>00956                 tot_size += i-&gt;size();
+<a name="l00957"></a>00957             }
+<a name="l00958"></a>00958             <span class="keywordflow">return</span> tot_size;
+<a name="l00959"></a>00959         }
+<a name="l00960"></a>00960 
+<a name="l00961"></a>00961     <span class="keyword">private</span>:
+<a name="l00962"></a>00962 
+<a name="l00963"></a>00963         Container *my_container;
+<a name="l00964"></a>00964         <span class="keyword">typename</span> Container::const_iterator my_begin;
+<a name="l00965"></a>00965         <span class="keyword">typename</span> Container::const_iterator my_end;
+<a name="l00966"></a>00966 
+<a name="l00967"></a>00967     };
+<a name="l00968"></a>00968 
+<a name="l00969"></a>00969     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Container&gt;
+<a name="l00970"></a>00970     flattened2d&lt;Container&gt; flatten2d(<span class="keyword">const</span> Container &amp;c, <span class="keyword">const</span> <span class="keyword">typename</span> Container::const_iterator b, <span class="keyword">const</span> <span class="keyword">typename</span> Container::const_iterator e) {
+<a name="l00971"></a>00971         <span class="keywordflow">return</span> flattened2d&lt;Container&gt;(c, b, e);
+<a name="l00972"></a>00972     }
+<a name="l00973"></a>00973 
+<a name="l00974"></a>00974     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Container&gt;
+<a name="l00975"></a>00975     flattened2d&lt;Container&gt; flatten2d(<span class="keyword">const</span> Container &amp;c) {
+<a name="l00976"></a>00976         <span class="keywordflow">return</span> flattened2d&lt;Container&gt;(c);
+<a name="l00977"></a>00977     }
+<a name="l00978"></a>00978 
+<a name="l00979"></a>00979 } <span class="comment">// interface6</span>
+<a name="l00980"></a>00980 
+<a name="l00981"></a>00981 <span class="keyword">namespace </span>internal {
+<a name="l00982"></a>00982 <span class="keyword">using</span> interface6::internal::segmented_iterator;
+<a name="l00983"></a>00983 }
+<a name="l00984"></a>00984 
+<a name="l00985"></a>00985 <span class="keyword">using</span> interface6::enumerable_thread_specific;
+<a name="l00986"></a>00986 <span class="keyword">using</span> interface6::flattened2d;
+<a name="l00987"></a>00987 <span class="keyword">using</span> interface6::flatten2d;
+<a name="l00988"></a>00988 
+<a name="l00989"></a>00989 } <span class="comment">// namespace tbb</span>
+<a name="l00990"></a>00990 
+<a name="l00991"></a>00991 <span class="preprocessor">#endif</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00372.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00372.html
new file mode 100644 (file)
index 0000000..7c0476e
--- /dev/null
@@ -0,0 +1,240 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#else</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span><span class="preprocessor">#include &lt;pthread.h&gt;</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00031"></a>00031 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="keyword">namespace </span>tbb {
+<a name="l00036"></a>00036 
+<a name="l00038"></a>00038 
+<a name="l00040"></a><a class="code" href="a00177.html">00040</a> <span class="keyword">class </span><a class="code" href="a00177.html">mutex</a> {
+<a name="l00041"></a>00041 <span class="keyword">public</span>:
+<a name="l00043"></a><a class="code" href="a00177.html#05313cb77d4f85213103d4dab74ed454">00043</a>     <a class="code" href="a00177.html#05313cb77d4f85213103d4dab74ed454">mutex</a>() {
+<a name="l00044"></a>00044 <span class="preprocessor">#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS</span>
+<a name="l00045"></a>00045 <span class="preprocessor"></span>    internal_construct();
+<a name="l00046"></a>00046 <span class="preprocessor">#else</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span>        InitializeCriticalSection(&amp;impl);
+<a name="l00049"></a>00049 <span class="preprocessor">  #else</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span>        <span class="keywordtype">int</span> error_code = pthread_mutex_init(&amp;impl,NULL);
+<a name="l00051"></a>00051         <span class="keywordflow">if</span>( error_code )
+<a name="l00052"></a>00052             tbb::internal::handle_perror(error_code,<span class="stringliteral">"mutex: pthread_mutex_init failed"</span>);
+<a name="l00053"></a>00053 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64*/</span>
+<a name="l00054"></a>00054 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00055"></a>00055     };
+<a name="l00056"></a>00056 
+<a name="l00057"></a>00057     ~<a class="code" href="a00177.html">mutex</a>() {
+<a name="l00058"></a>00058 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00059"></a>00059 <span class="preprocessor"></span>        internal_destroy();
+<a name="l00060"></a>00060 <span class="preprocessor">#else</span>
+<a name="l00061"></a>00061 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span>        DeleteCriticalSection(&amp;impl);
+<a name="l00063"></a>00063 <span class="preprocessor">  #else</span>
+<a name="l00064"></a>00064 <span class="preprocessor"></span>        pthread_mutex_destroy(&amp;impl); 
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00067"></a>00067 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00068"></a>00068     };
+<a name="l00069"></a>00069 
+<a name="l00070"></a>00070     <span class="keyword">class </span>scoped_lock;
+<a name="l00071"></a>00071     <span class="keyword">friend</span> <span class="keyword">class </span>scoped_lock;
+<a name="l00072"></a>00072 
+<a name="l00074"></a>00074 
+<a name="l00076"></a><a class="code" href="a00178.html">00076</a>     <span class="keyword">class </span><a class="code" href="a00178.html">scoped_lock</a> : internal::no_copy {
+<a name="l00077"></a>00077     <span class="keyword">public</span>:
+<a name="l00079"></a><a class="code" href="a00178.html#1d403ae51b484df5d86d85ae38f11e6e">00079</a>         <a class="code" href="a00178.html#1d403ae51b484df5d86d85ae38f11e6e">scoped_lock</a>() : my_mutex(NULL) {};
+<a name="l00080"></a>00080 
+<a name="l00082"></a><a class="code" href="a00178.html#605a6b9af0f8cdabdf81825e0de99600">00082</a>         <a class="code" href="a00178.html#1d403ae51b484df5d86d85ae38f11e6e">scoped_lock</a>( <a class="code" href="a00177.html">mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00083"></a>00083             <a class="code" href="a00178.html#862e022841cdc522e4296a5533b22efd">acquire</a>( mutex );
+<a name="l00084"></a>00084         }
+<a name="l00085"></a>00085 
+<a name="l00087"></a><a class="code" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">00087</a>         <a class="code" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">~scoped_lock</a>() {
+<a name="l00088"></a>00088             <span class="keywordflow">if</span>( my_mutex ) 
+<a name="l00089"></a>00089                 <a class="code" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">release</a>();
+<a name="l00090"></a>00090         }
+<a name="l00091"></a>00091 
+<a name="l00093"></a><a class="code" href="a00178.html#862e022841cdc522e4296a5533b22efd">00093</a>         <span class="keywordtype">void</span> <a class="code" href="a00178.html#862e022841cdc522e4296a5533b22efd">acquire</a>( <a class="code" href="a00177.html">mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00094"></a>00094 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00095"></a>00095 <span class="preprocessor"></span>            internal_acquire(mutex);
+<a name="l00096"></a>00096 <span class="preprocessor">#else</span>
+<a name="l00097"></a>00097 <span class="preprocessor"></span>            mutex.<a class="code" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">lock</a>();
+<a name="l00098"></a>00098             my_mutex = &amp;mutex;
+<a name="l00099"></a>00099 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00100"></a>00100         }
+<a name="l00101"></a>00101 
+<a name="l00103"></a><a class="code" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">00103</a>         <span class="keywordtype">bool</span> <a class="code" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">try_acquire</a>( <a class="code" href="a00177.html">mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00104"></a>00104 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00105"></a>00105 <span class="preprocessor"></span>            <span class="keywordflow">return</span> internal_try_acquire (mutex);
+<a name="l00106"></a>00106 <span class="preprocessor">#else</span>
+<a name="l00107"></a>00107 <span class="preprocessor"></span>            <span class="keywordtype">bool</span> result = mutex.<a class="code" href="a00177.html#4331652c79dea1c1131bd59ab161b234">try_lock</a>();
+<a name="l00108"></a>00108             <span class="keywordflow">if</span>( result )
+<a name="l00109"></a>00109                 my_mutex = &amp;mutex;
+<a name="l00110"></a>00110             <span class="keywordflow">return</span> result;
+<a name="l00111"></a>00111 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00112"></a>00112         }
+<a name="l00113"></a>00113 
+<a name="l00115"></a><a class="code" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">00115</a>         <span class="keywordtype">void</span> <a class="code" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">release</a>() {
+<a name="l00116"></a>00116 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>            internal_release ();
+<a name="l00118"></a>00118 <span class="preprocessor">#else</span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span>            my_mutex-&gt;<a class="code" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">unlock</a>();
+<a name="l00120"></a>00120             my_mutex = NULL;
+<a name="l00121"></a>00121 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00122"></a>00122         }
+<a name="l00123"></a>00123 
+<a name="l00124"></a>00124     <span class="keyword">private</span>:
+<a name="l00126"></a>00126         <a class="code" href="a00177.html">mutex</a>* my_mutex;
+<a name="l00127"></a>00127 
+<a name="l00129"></a>00129         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_acquire( <a class="code" href="a00177.html">mutex</a>&amp; m );
+<a name="l00130"></a>00130 
+<a name="l00132"></a>00132         <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_try_acquire( <a class="code" href="a00177.html">mutex</a>&amp; m );
+<a name="l00133"></a>00133 
+<a name="l00135"></a>00135         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_release();
+<a name="l00136"></a>00136 
+<a name="l00137"></a>00137         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00177.html">mutex</a>;
+<a name="l00138"></a>00138     };
+<a name="l00139"></a>00139 
+<a name="l00140"></a>00140     <span class="comment">// Mutex traits</span>
+<a name="l00141"></a>00141     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;
+<a name="l00142"></a>00142     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00143"></a>00143     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">false</span>;
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145     <span class="comment">// ISO C++0x compatibility methods</span>
+<a name="l00146"></a>00146 
+<a name="l00148"></a><a class="code" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">00148</a>     <span class="keywordtype">void</span> <a class="code" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">lock</a>() {
+<a name="l00149"></a>00149 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00150"></a>00150 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00151"></a>00151         <span class="keyword">new</span>(tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()) <a class="code" href="a00178.html">scoped_lock</a>(*<span class="keyword">this</span>);
+<a name="l00152"></a>00152 <span class="preprocessor">#else</span>
+<a name="l00153"></a>00153 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00154"></a>00154 <span class="preprocessor"></span>        EnterCriticalSection(&amp;impl);
+<a name="l00155"></a>00155 <span class="preprocessor">  #else</span>
+<a name="l00156"></a>00156 <span class="preprocessor"></span>        pthread_mutex_lock(&amp;impl);
+<a name="l00157"></a>00157 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00158"></a>00158 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00159"></a>00159     }
+<a name="l00160"></a>00160 
+<a name="l00162"></a>00162 
+<a name="l00163"></a><a class="code" href="a00177.html#4331652c79dea1c1131bd59ab161b234">00163</a>     <span class="keywordtype">bool</span> <a class="code" href="a00177.html#4331652c79dea1c1131bd59ab161b234">try_lock</a>() {
+<a name="l00164"></a>00164 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00165"></a>00165 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00166"></a>00166         <a class="code" href="a00178.html">scoped_lock</a>&amp; s = *tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>();
+<a name="l00167"></a>00167         s.<a class="code" href="a00178.html#be42c8fe11cce1af4fe1e9eaab717567">my_mutex</a> = NULL;
+<a name="l00168"></a>00168         <span class="keywordflow">return</span> s.<a class="code" href="a00178.html#e86ffb41a026694b16637f78aa337874">internal_try_acquire</a>(*<span class="keyword">this</span>);
+<a name="l00169"></a>00169 <span class="preprocessor">#else</span>
+<a name="l00170"></a>00170 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00171"></a>00171 <span class="preprocessor"></span>        <span class="keywordflow">return</span> TryEnterCriticalSection(&amp;impl)!=0;
+<a name="l00172"></a>00172 <span class="preprocessor">  #else</span>
+<a name="l00173"></a>00173 <span class="preprocessor"></span>        <span class="keywordflow">return</span> pthread_mutex_trylock(&amp;impl)==0;
+<a name="l00174"></a>00174 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00175"></a>00175 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00176"></a>00176     }
+<a name="l00177"></a>00177 
+<a name="l00179"></a><a class="code" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">00179</a>     <span class="keywordtype">void</span> <a class="code" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">unlock</a>() {
+<a name="l00180"></a>00180 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00182"></a>00182         <a class="code" href="a00178.html">scoped_lock</a>&amp; s = *tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>();
+<a name="l00183"></a>00183         s.<a class="code" href="a00178.html#be42c8fe11cce1af4fe1e9eaab717567">my_mutex</a> = <span class="keyword">this</span>;
+<a name="l00184"></a>00184         s.<a class="code" href="a00178.html#448b7e9042afcdc455ea3f929ac4c594">internal_release</a>();
+<a name="l00185"></a>00185 <span class="preprocessor">#else</span>
+<a name="l00186"></a>00186 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00187"></a>00187 <span class="preprocessor"></span>        LeaveCriticalSection(&amp;impl);
+<a name="l00188"></a>00188 <span class="preprocessor">  #else</span>
+<a name="l00189"></a>00189 <span class="preprocessor"></span>        pthread_mutex_unlock(&amp;impl);
+<a name="l00190"></a>00190 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00191"></a>00191 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00192"></a>00192     }
+<a name="l00193"></a>00193 
+<a name="l00195"></a>00195 <span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00196"></a><a class="code" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">00196</a> <span class="preprocessor"></span>    <span class="keyword">typedef</span> LPCRITICAL_SECTION <a class="code" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a>;
+<a name="l00197"></a>00197 <span class="preprocessor">  #else</span>
+<a name="l00198"></a>00198 <span class="preprocessor"></span>    <span class="keyword">typedef</span> pthread_mutex_t* <a class="code" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a>;
+<a name="l00199"></a>00199 <span class="preprocessor">  #endif</span>
+<a name="l00200"></a>00200 <span class="preprocessor"></span>    <a class="code" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a> native_handle() { <span class="keywordflow">return</span> (<a class="code" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">native_handle_type</a>) &amp;impl; }
+<a name="l00201"></a>00201 
+<a name="l00202"></a>00202     <span class="keyword">enum</span> state_t {
+<a name="l00203"></a>00203         INITIALIZED=0x1234,
+<a name="l00204"></a>00204         DESTROYED=0x789A,
+<a name="l00205"></a>00205         HELD=0x56CD
+<a name="l00206"></a>00206     };
+<a name="l00207"></a>00207 <span class="keyword">private</span>:
+<a name="l00208"></a>00208 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00209"></a>00209 <span class="preprocessor"></span>    CRITICAL_SECTION impl;    
+<a name="l00210"></a>00210     <span class="keyword">enum</span> state_t state;
+<a name="l00211"></a>00211 <span class="preprocessor">#else</span>
+<a name="l00212"></a>00212 <span class="preprocessor"></span>    pthread_mutex_t impl;
+<a name="l00213"></a>00213 <span class="preprocessor">#endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00214"></a>00214 
+<a name="l00216"></a>00216     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00217"></a>00217 
+<a name="l00219"></a>00219     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_destroy();
+<a name="l00220"></a>00220 
+<a name="l00221"></a>00221 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00222"></a>00222 <span class="preprocessor"></span><span class="keyword">public</span>:
+<a name="l00224"></a><a class="code" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">00224</a>     <span class="keywordtype">void</span> <a class="code" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">set_state</a>( state_t to ) { state = to; }
+<a name="l00225"></a>00225 <span class="preprocessor">#endif</span>
+<a name="l00226"></a>00226 <span class="preprocessor"></span>};
+<a name="l00227"></a>00227 
+<a name="l00228"></a>00228 __TBB_DEFINE_PROFILING_SET_NAME(mutex)
+<a name="l00229"></a>00229 
+<a name="l00230"></a>00230 } <span class="comment">// namespace tbb </span>
+<a name="l00231"></a>00231 
+<a name="l00232"></a>00232 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00374.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00374.html
new file mode 100644 (file)
index 0000000..ac7c2a8
--- /dev/null
@@ -0,0 +1,81 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>null_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>null_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_null_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_null_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="keyword">namespace </span>tbb {
+<a name="l00025"></a>00025     
+<a name="l00027"></a>00027 
+<a name="l00029"></a><a class="code" href="a00179.html">00029</a> <span class="keyword">class </span><a class="code" href="a00179.html">null_mutex</a> {   
+<a name="l00031"></a>00031     <a class="code" href="a00179.html">null_mutex</a>( <span class="keyword">const</span> <a class="code" href="a00179.html">null_mutex</a>&amp; );   
+<a name="l00032"></a>00032     <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> <a class="code" href="a00179.html">null_mutex</a>&amp; );   
+<a name="l00033"></a>00033 <span class="keyword">public</span>:   
+<a name="l00035"></a><a class="code" href="a00180.html">00035</a>     <span class="keyword">class </span><a class="code" href="a00180.html">scoped_lock</a> {   
+<a name="l00036"></a>00036     <span class="keyword">public</span>:   
+<a name="l00037"></a>00037         <a class="code" href="a00180.html">scoped_lock</a>() {}
+<a name="l00038"></a>00038         <a class="code" href="a00180.html">scoped_lock</a>( <a class="code" href="a00179.html">null_mutex</a>&amp; ) {}   
+<a name="l00039"></a>00039         ~<a class="code" href="a00180.html">scoped_lock</a>() {}
+<a name="l00040"></a>00040         <span class="keywordtype">void</span> acquire( <a class="code" href="a00179.html">null_mutex</a>&amp; ) {}
+<a name="l00041"></a>00041         <span class="keywordtype">bool</span> try_acquire( <a class="code" href="a00179.html">null_mutex</a>&amp; ) { <span class="keywordflow">return</span> <span class="keyword">true</span>; }
+<a name="l00042"></a>00042         <span class="keywordtype">void</span> release() {}
+<a name="l00043"></a>00043     };
+<a name="l00044"></a>00044   
+<a name="l00045"></a>00045     <a class="code" href="a00179.html">null_mutex</a>() {}
+<a name="l00046"></a>00046     
+<a name="l00047"></a>00047     <span class="comment">// Mutex traits   </span>
+<a name="l00048"></a>00048     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;   
+<a name="l00049"></a>00049     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">true</span>;
+<a name="l00050"></a>00050     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">true</span>;
+<a name="l00051"></a>00051 };  
+<a name="l00052"></a>00052 
+<a name="l00053"></a>00053 }
+<a name="l00054"></a>00054 
+<a name="l00055"></a>00055 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_null_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00375.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00375.html
new file mode 100644 (file)
index 0000000..846b0d6
--- /dev/null
@@ -0,0 +1,83 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>null_rw_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>null_rw_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_null_rw_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_null_rw_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="keyword">namespace </span>tbb {
+<a name="l00025"></a>00025     
+<a name="l00027"></a>00027 
+<a name="l00029"></a><a class="code" href="a00181.html">00029</a> <span class="keyword">class </span><a class="code" href="a00181.html">null_rw_mutex</a> {
+<a name="l00031"></a>00031     <a class="code" href="a00181.html">null_rw_mutex</a>( <span class="keyword">const</span> <a class="code" href="a00181.html">null_rw_mutex</a>&amp; );   
+<a name="l00032"></a>00032     <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> <a class="code" href="a00181.html">null_rw_mutex</a>&amp; );   
+<a name="l00033"></a>00033 <span class="keyword">public</span>:   
+<a name="l00035"></a><a class="code" href="a00182.html">00035</a>     <span class="keyword">class </span><a class="code" href="a00182.html">scoped_lock</a> {   
+<a name="l00036"></a>00036     <span class="keyword">public</span>:   
+<a name="l00037"></a>00037         <a class="code" href="a00182.html">scoped_lock</a>() {}
+<a name="l00038"></a>00038         <a class="code" href="a00182.html">scoped_lock</a>( <a class="code" href="a00181.html">null_rw_mutex</a>&amp; , <span class="keywordtype">bool</span> = <span class="keyword">true</span> ) {}
+<a name="l00039"></a>00039         ~<a class="code" href="a00182.html">scoped_lock</a>() {}
+<a name="l00040"></a>00040         <span class="keywordtype">void</span> acquire( <a class="code" href="a00181.html">null_rw_mutex</a>&amp; , <span class="keywordtype">bool</span> = <span class="keyword">true</span> ) {}
+<a name="l00041"></a>00041         <span class="keywordtype">bool</span> upgrade_to_writer() { <span class="keywordflow">return</span> <span class="keyword">true</span>; }
+<a name="l00042"></a>00042         <span class="keywordtype">bool</span> downgrade_to_reader() { <span class="keywordflow">return</span> <span class="keyword">true</span>; }
+<a name="l00043"></a>00043         <span class="keywordtype">bool</span> try_acquire( <a class="code" href="a00181.html">null_rw_mutex</a>&amp; , <span class="keywordtype">bool</span> = <span class="keyword">true</span> ) { <span class="keywordflow">return</span> <span class="keyword">true</span>; }
+<a name="l00044"></a>00044         <span class="keywordtype">void</span> release() {}
+<a name="l00045"></a>00045     };
+<a name="l00046"></a>00046   
+<a name="l00047"></a>00047     <a class="code" href="a00181.html">null_rw_mutex</a>() {}
+<a name="l00048"></a>00048     
+<a name="l00049"></a>00049     <span class="comment">// Mutex traits   </span>
+<a name="l00050"></a>00050     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">true</span>;   
+<a name="l00051"></a>00051     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">true</span>;
+<a name="l00052"></a>00052     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">true</span>;
+<a name="l00053"></a>00053 };  
+<a name="l00054"></a>00054 
+<a name="l00055"></a>00055 }
+<a name="l00056"></a>00056 
+<a name="l00057"></a>00057 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_null_rw_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00376.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00376.html
new file mode 100644 (file)
index 0000000..673b350
--- /dev/null
@@ -0,0 +1,480 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_do.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_do.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_do_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_do_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00027"></a>00027 
+<a name="l00028"></a>00028 <span class="keyword">namespace </span>tbb {
+<a name="l00029"></a>00029 
+<a name="l00031"></a>00031 <span class="keyword">namespace </span>internal {
+<a name="l00032"></a>00032     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt; <span class="keyword">class </span>parallel_do_feeder_impl;
+<a name="l00033"></a>00033     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt; <span class="keyword">class </span>do_group_task;
+<a name="l00034"></a>00034 
+<a name="l00036"></a>00036     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00037"></a>00037     <span class="keyword">struct </span>strip { <span class="keyword">typedef</span> T type; };
+<a name="l00038"></a>00038     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00039"></a>00039     <span class="keyword">struct </span>strip&lt;T&amp;&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00040"></a>00040     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00041"></a>00041     <span class="keyword">struct </span>strip&lt;const T&amp;&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00042"></a>00042     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00043"></a>00043     <span class="keyword">struct </span>strip&lt;volatile T&amp;&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00044"></a>00044     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00045"></a>00045     <span class="keyword">struct </span>strip&lt;const volatile T&amp;&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00046"></a>00046     <span class="comment">// Most of the compilers remove cv-qualifiers from non-reference function argument types. </span>
+<a name="l00047"></a>00047     <span class="comment">// But unfortunately there are those that don't.</span>
+<a name="l00048"></a>00048     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00049"></a>00049     <span class="keyword">struct </span>strip&lt;const T&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00050"></a>00050     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00051"></a>00051     <span class="keyword">struct </span>strip&lt;volatile T&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00052"></a>00052     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00053"></a>00053     <span class="keyword">struct </span>strip&lt;const volatile T&gt; { <span class="keyword">typedef</span> T type; };
+<a name="l00054"></a>00054 } <span class="comment">// namespace internal</span>
+<a name="l00056"></a>00056 <span class="comment"></span>
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Item&gt;
+<a name="l00060"></a><a class="code" href="a00183.html">00060</a> <span class="keyword">class </span><a class="code" href="a00183.html">parallel_do_feeder</a>: internal::no_copy
+<a name="l00061"></a>00061 {
+<a name="l00062"></a>00062     <a class="code" href="a00183.html">parallel_do_feeder</a>() {}
+<a name="l00063"></a>00063     <span class="keyword">virtual</span> ~<a class="code" href="a00183.html">parallel_do_feeder</a> () {}
+<a name="l00064"></a>00064     <span class="keyword">virtual</span> <span class="keywordtype">void</span> internal_add( <span class="keyword">const</span> Item&amp; item ) = 0;
+<a name="l00065"></a>00065     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::parallel_do_feeder_impl;
+<a name="l00066"></a>00066 <span class="keyword">public</span>:
+<a name="l00068"></a><a class="code" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">00068</a>     <span class="keywordtype">void</span> <a class="code" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">add</a>( <span class="keyword">const</span> Item&amp; item ) {internal_add(item);}
+<a name="l00069"></a>00069 };
+<a name="l00070"></a>00070 
+<a name="l00072"></a>00072 <span class="keyword">namespace </span>internal {
+<a name="l00074"></a>00074 
+<a name="l00076"></a>00076     <span class="keyword">template</span>&lt;<span class="keyword">class</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00077"></a>00077     <span class="keyword">class </span>parallel_do_operator_selector
+<a name="l00078"></a>00078     {
+<a name="l00079"></a>00079         <span class="keyword">typedef</span> parallel_do_feeder&lt;Item&gt; Feeder;
+<a name="l00080"></a>00080         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> A1, <span class="keyword">typename</span> A2, <span class="keyword">typename</span> CvItem &gt;
+<a name="l00081"></a>00081         <span class="keyword">static</span> <span class="keywordtype">void</span> internal_call( <span class="keyword">const</span> Body&amp; obj, A1&amp; arg1, A2&amp;, <span class="keywordtype">void</span> (Body::*)(CvItem) <span class="keyword">const</span> ) {
+<a name="l00082"></a>00082             obj(arg1);
+<a name="l00083"></a>00083         }
+<a name="l00084"></a>00084         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> A1, <span class="keyword">typename</span> A2, <span class="keyword">typename</span> CvItem &gt;
+<a name="l00085"></a>00085         <span class="keyword">static</span> <span class="keywordtype">void</span> internal_call( <span class="keyword">const</span> Body&amp; obj, A1&amp; arg1, A2&amp; arg2, <span class="keywordtype">void</span> (Body::*)(CvItem, parallel_do_feeder&lt;Item&gt;&amp;) <span class="keyword">const</span> ) {
+<a name="l00086"></a>00086             obj(arg1, arg2);
+<a name="l00087"></a>00087         }
+<a name="l00088"></a>00088 
+<a name="l00089"></a>00089     <span class="keyword">public</span>:
+<a name="l00090"></a>00090         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> A1, <span class="keyword">typename</span> A2 &gt;
+<a name="l00091"></a>00091         <span class="keyword">static</span> <span class="keywordtype">void</span> call( <span class="keyword">const</span> Body&amp; obj, A1&amp; arg1, A2&amp; arg2 )
+<a name="l00092"></a>00092         {
+<a name="l00093"></a>00093             internal_call( obj, arg1, arg2, &amp;Body::operator() );
+<a name="l00094"></a>00094         }
+<a name="l00095"></a>00095     };
+<a name="l00096"></a>00096 
+<a name="l00098"></a>00098 
+<a name="l00100"></a>00100     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00101"></a>00101     <span class="keyword">class </span>do_iteration_task: <span class="keyword">public</span> task
+<a name="l00102"></a>00102     {
+<a name="l00103"></a>00103         <span class="keyword">typedef</span> parallel_do_feeder_impl&lt;Body, Item&gt; feeder_type;
+<a name="l00104"></a>00104 
+<a name="l00105"></a>00105         Item my_value;
+<a name="l00106"></a>00106         feeder_type&amp; my_feeder;
+<a name="l00107"></a>00107 
+<a name="l00108"></a>00108         do_iteration_task( <span class="keyword">const</span> Item&amp; value, feeder_type&amp; feeder ) : 
+<a name="l00109"></a>00109             my_value(value), my_feeder(feeder)
+<a name="l00110"></a>00110         {}
+<a name="l00111"></a>00111 
+<a name="l00112"></a>00112         <span class="comment">/*override*/</span> 
+<a name="l00113"></a>00113         task* execute()
+<a name="l00114"></a>00114         {
+<a name="l00115"></a>00115             parallel_do_operator_selector&lt;Body, Item&gt;::call(*my_feeder.my_body, my_value, my_feeder);
+<a name="l00116"></a>00116             <span class="keywordflow">return</span> NULL;
+<a name="l00117"></a>00117         }
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>parallel_do_feeder_impl;
+<a name="l00120"></a>00120     }; <span class="comment">// class do_iteration_task</span>
+<a name="l00121"></a>00121 
+<a name="l00122"></a>00122     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00123"></a>00123     <span class="keyword">class </span>do_iteration_task_iter: <span class="keyword">public</span> task
+<a name="l00124"></a>00124     {
+<a name="l00125"></a>00125         <span class="keyword">typedef</span> parallel_do_feeder_impl&lt;Body, Item&gt; feeder_type;
+<a name="l00126"></a>00126 
+<a name="l00127"></a>00127         Iterator my_iter;
+<a name="l00128"></a>00128         feeder_type&amp; my_feeder;
+<a name="l00129"></a>00129 
+<a name="l00130"></a>00130         do_iteration_task_iter( <span class="keyword">const</span> Iterator&amp; iter, feeder_type&amp; feeder ) : 
+<a name="l00131"></a>00131             my_iter(iter), my_feeder(feeder)
+<a name="l00132"></a>00132         {}
+<a name="l00133"></a>00133 
+<a name="l00134"></a>00134         <span class="comment">/*override*/</span> 
+<a name="l00135"></a>00135         task* execute()
+<a name="l00136"></a>00136         {
+<a name="l00137"></a>00137             parallel_do_operator_selector&lt;Body, Item&gt;::call(*my_feeder.my_body, *my_iter, my_feeder);
+<a name="l00138"></a>00138             <span class="keywordflow">return</span> NULL;
+<a name="l00139"></a>00139         }
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator_, <span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>do_group_task_forward;    
+<a name="l00142"></a>00142         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>do_group_task_input;    
+<a name="l00143"></a>00143         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator_, <span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>do_task_iter;    
+<a name="l00144"></a>00144     }; <span class="comment">// class do_iteration_task_iter</span>
+<a name="l00145"></a>00145 
+<a name="l00147"></a>00147 
+<a name="l00149"></a>00149     <span class="keyword">template</span>&lt;<span class="keyword">class</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00150"></a>00150     <span class="keyword">class </span>parallel_do_feeder_impl : <span class="keyword">public</span> parallel_do_feeder&lt;Item&gt;
+<a name="l00151"></a>00151     {
+<a name="l00152"></a>00152         <span class="comment">/*override*/</span> 
+<a name="l00153"></a>00153         <span class="keywordtype">void</span> internal_add( <span class="keyword">const</span> Item&amp; item )
+<a name="l00154"></a>00154         {
+<a name="l00155"></a>00155             <span class="keyword">typedef</span> do_iteration_task&lt;Body, Item&gt; iteration_type;
+<a name="l00156"></a>00156 
+<a name="l00157"></a>00157             iteration_type&amp; t = *<span class="keyword">new</span> (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *<span class="keyword">this</span>);
+<a name="l00158"></a>00158 
+<a name="l00159"></a>00159             t.spawn( t );
+<a name="l00160"></a>00160         }
+<a name="l00161"></a>00161     <span class="keyword">public</span>:
+<a name="l00162"></a>00162         <span class="keyword">const</span> Body* my_body;
+<a name="l00163"></a>00163         empty_task* my_barrier;
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165         parallel_do_feeder_impl()
+<a name="l00166"></a>00166         {
+<a name="l00167"></a>00167             my_barrier = <span class="keyword">new</span>( <a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>() ) empty_task();
+<a name="l00168"></a>00168             __TBB_ASSERT(my_barrier, <span class="stringliteral">"root task allocation failed"</span>);
+<a name="l00169"></a>00169         }
+<a name="l00170"></a>00170 
+<a name="l00171"></a>00171 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00172"></a>00172 <span class="preprocessor"></span>        parallel_do_feeder_impl(<a class="code" href="a00206.html">tbb::task_group_context</a> &amp;context)
+<a name="l00173"></a>00173         {
+<a name="l00174"></a>00174             my_barrier = <span class="keyword">new</span>( <a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>(context) ) empty_task();
+<a name="l00175"></a>00175             __TBB_ASSERT(my_barrier, <span class="stringliteral">"root task allocation failed"</span>);
+<a name="l00176"></a>00176         }
+<a name="l00177"></a>00177 <span class="preprocessor">#endif</span>
+<a name="l00178"></a>00178 <span class="preprocessor"></span>
+<a name="l00179"></a>00179         ~parallel_do_feeder_impl()
+<a name="l00180"></a>00180         {
+<a name="l00181"></a>00181             my_barrier-&gt;destroy(*my_barrier);
+<a name="l00182"></a>00182         }
+<a name="l00183"></a>00183     }; <span class="comment">// class parallel_do_feeder_impl</span>
+<a name="l00184"></a>00184 
+<a name="l00185"></a>00185 
+<a name="l00187"></a>00187 
+<a name="l00190"></a>00190     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00191"></a>00191     <span class="keyword">class </span>do_group_task_forward: <span class="keyword">public</span> task
+<a name="l00192"></a>00192     {
+<a name="l00193"></a>00193         <span class="keyword">static</span> <span class="keyword">const</span> size_t max_arg_size = 4;         
+<a name="l00194"></a>00194 
+<a name="l00195"></a>00195         <span class="keyword">typedef</span> parallel_do_feeder_impl&lt;Body, Item&gt; feeder_type;
+<a name="l00196"></a>00196 
+<a name="l00197"></a>00197         feeder_type&amp; my_feeder;
+<a name="l00198"></a>00198         Iterator my_first;
+<a name="l00199"></a>00199         size_t my_size;
+<a name="l00200"></a>00200         
+<a name="l00201"></a>00201         do_group_task_forward( Iterator first, size_t size, feeder_type&amp; feeder ) 
+<a name="l00202"></a>00202             : my_feeder(feeder), my_first(first), my_size(size)
+<a name="l00203"></a>00203         {}
+<a name="l00204"></a>00204 
+<a name="l00205"></a>00205         <span class="comment">/*override*/</span> task* execute()
+<a name="l00206"></a>00206         {
+<a name="l00207"></a>00207             <span class="keyword">typedef</span> do_iteration_task_iter&lt;Iterator, Body, Item&gt; iteration_type;
+<a name="l00208"></a>00208             __TBB_ASSERT( my_size&gt;0, NULL );
+<a name="l00209"></a>00209             task_list list;
+<a name="l00210"></a>00210             task* t; 
+<a name="l00211"></a>00211             size_t k=0; 
+<a name="l00212"></a>00212             <span class="keywordflow">for</span>(;;) {
+<a name="l00213"></a>00213                 t = <span class="keyword">new</span>( allocate_child() ) iteration_type( my_first, my_feeder );
+<a name="l00214"></a>00214                 ++my_first;
+<a name="l00215"></a>00215                 <span class="keywordflow">if</span>( ++k==my_size ) <span class="keywordflow">break</span>;
+<a name="l00216"></a>00216                 list.push_back(*t);
+<a name="l00217"></a>00217             }
+<a name="l00218"></a>00218             set_ref_count(<span class="keywordtype">int</span>(k+1));
+<a name="l00219"></a>00219             spawn(list);
+<a name="l00220"></a>00220             spawn_and_wait_for_all(*t);
+<a name="l00221"></a>00221             <span class="keywordflow">return</span> NULL;
+<a name="l00222"></a>00222         }
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator_, <span class="keyword">typename</span> Body_, <span class="keyword">typename</span> _Item&gt; <span class="keyword">friend</span> <span class="keyword">class </span>do_task_iter;
+<a name="l00225"></a>00225     }; <span class="comment">// class do_group_task_forward</span>
+<a name="l00226"></a>00226 
+<a name="l00227"></a>00227     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00228"></a>00228     <span class="keyword">class </span>do_group_task_input: <span class="keyword">public</span> task
+<a name="l00229"></a>00229     {
+<a name="l00230"></a>00230         <span class="keyword">static</span> <span class="keyword">const</span> size_t max_arg_size = 4;         
+<a name="l00231"></a>00231         
+<a name="l00232"></a>00232         <span class="keyword">typedef</span> parallel_do_feeder_impl&lt;Body, Item&gt; feeder_type;
+<a name="l00233"></a>00233 
+<a name="l00234"></a>00234         feeder_type&amp; my_feeder;
+<a name="l00235"></a>00235         size_t my_size;
+<a name="l00236"></a>00236         aligned_space&lt;Item, max_arg_size&gt; my_arg;
+<a name="l00237"></a>00237 
+<a name="l00238"></a>00238         do_group_task_input( feeder_type&amp; feeder ) 
+<a name="l00239"></a>00239             : my_feeder(feeder), my_size(0)
+<a name="l00240"></a>00240         {}
+<a name="l00241"></a>00241 
+<a name="l00242"></a>00242         <span class="comment">/*override*/</span> task* execute()
+<a name="l00243"></a>00243         {
+<a name="l00244"></a>00244             <span class="keyword">typedef</span> do_iteration_task_iter&lt;Item*, Body, Item&gt; iteration_type;
+<a name="l00245"></a>00245             __TBB_ASSERT( my_size&gt;0, NULL );
+<a name="l00246"></a>00246             task_list list;
+<a name="l00247"></a>00247             task* t; 
+<a name="l00248"></a>00248             size_t k=0; 
+<a name="l00249"></a>00249             <span class="keywordflow">for</span>(;;) {
+<a name="l00250"></a>00250                 t = <span class="keyword">new</span>( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder );
+<a name="l00251"></a>00251                 <span class="keywordflow">if</span>( ++k==my_size ) <span class="keywordflow">break</span>;
+<a name="l00252"></a>00252                 list.push_back(*t);
+<a name="l00253"></a>00253             }
+<a name="l00254"></a>00254             set_ref_count(<span class="keywordtype">int</span>(k+1));
+<a name="l00255"></a>00255             spawn(list);
+<a name="l00256"></a>00256             spawn_and_wait_for_all(*t);
+<a name="l00257"></a>00257             <span class="keywordflow">return</span> NULL;
+<a name="l00258"></a>00258         }
+<a name="l00259"></a>00259 
+<a name="l00260"></a>00260         ~do_group_task_input(){
+<a name="l00261"></a>00261             <span class="keywordflow">for</span>( size_t k=0; k&lt;my_size; ++k)
+<a name="l00262"></a>00262                 (my_arg.begin() + k)-&gt;~Item();
+<a name="l00263"></a>00263         }
+<a name="l00264"></a>00264 
+<a name="l00265"></a>00265         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator_, <span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Item_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>do_task_iter;
+<a name="l00266"></a>00266     }; <span class="comment">// class do_group_task_input</span>
+<a name="l00267"></a>00267     
+<a name="l00269"></a>00269 
+<a name="l00271"></a>00271     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt;
+<a name="l00272"></a>00272     <span class="keyword">class </span>do_task_iter: <span class="keyword">public</span> task
+<a name="l00273"></a>00273     {
+<a name="l00274"></a>00274         <span class="keyword">typedef</span> parallel_do_feeder_impl&lt;Body, Item&gt; feeder_type;
+<a name="l00275"></a>00275 
+<a name="l00276"></a>00276     <span class="keyword">public</span>:
+<a name="l00277"></a>00277         do_task_iter( Iterator first, Iterator last , feeder_type&amp; feeder ) : 
+<a name="l00278"></a>00278             my_first(first), my_last(last), my_feeder(feeder)
+<a name="l00279"></a>00279         {}
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281     <span class="keyword">private</span>:
+<a name="l00282"></a>00282         Iterator my_first;
+<a name="l00283"></a>00283         Iterator my_last;
+<a name="l00284"></a>00284         feeder_type&amp; my_feeder;
+<a name="l00285"></a>00285 
+<a name="l00286"></a>00286         <span class="comment">/* Do not merge run(xxx) and run_xxx() methods. They are separated in order</span>
+<a name="l00287"></a>00287 <span class="comment">            to make sure that compilers will eliminate unused argument of type xxx</span>
+<a name="l00288"></a>00288 <span class="comment">            (that is will not put it on stack). The sole purpose of this argument </span>
+<a name="l00289"></a>00289 <span class="comment">            is overload resolution.</span>
+<a name="l00290"></a>00290 <span class="comment">            </span>
+<a name="l00291"></a>00291 <span class="comment">            An alternative could be using template functions, but explicit specialization </span>
+<a name="l00292"></a>00292 <span class="comment">            of member function templates is not supported for non specialized class </span>
+<a name="l00293"></a>00293 <span class="comment">            templates. Besides template functions would always fall back to the least </span>
+<a name="l00294"></a>00294 <span class="comment">            efficient variant (the one for input iterators) in case of iterators having </span>
+<a name="l00295"></a>00295 <span class="comment">            custom tags derived from basic ones. */</span>
+<a name="l00296"></a>00296         <span class="comment">/*override*/</span> task* execute()
+<a name="l00297"></a>00297         {
+<a name="l00298"></a>00298             <span class="keyword">typedef</span> <span class="keyword">typename</span> std::iterator_traits&lt;Iterator&gt;::iterator_category iterator_tag;
+<a name="l00299"></a>00299             <span class="keywordflow">return</span> run( (iterator_tag*)NULL );
+<a name="l00300"></a>00300         }
+<a name="l00301"></a>00301 
+<a name="l00304"></a>00304         <span class="keyword">inline</span> task* run( <span class="keywordtype">void</span>* ) { <span class="keywordflow">return</span> run_for_input_iterator(); }
+<a name="l00305"></a>00305         
+<a name="l00306"></a>00306         task* run_for_input_iterator() {
+<a name="l00307"></a>00307             <span class="keyword">typedef</span> do_group_task_input&lt;Body, Item&gt; block_type;
+<a name="l00308"></a>00308 
+<a name="l00309"></a>00309             block_type&amp; t = *<span class="keyword">new</span>( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder);
+<a name="l00310"></a>00310             size_t k=0; 
+<a name="l00311"></a>00311             <span class="keywordflow">while</span>( !(my_first == my_last) ) {
+<a name="l00312"></a>00312                 <span class="keyword">new</span> (t.my_arg.begin() + k) Item(*my_first);
+<a name="l00313"></a>00313                 ++my_first;
+<a name="l00314"></a>00314                 <span class="keywordflow">if</span>( ++k==block_type::max_arg_size ) {
+<a name="l00315"></a>00315                     <span class="keywordflow">if</span> ( !(my_first == my_last) )
+<a name="l00316"></a>00316                         recycle_to_reexecute();
+<a name="l00317"></a>00317                     <span class="keywordflow">break</span>;
+<a name="l00318"></a>00318                 }
+<a name="l00319"></a>00319             }
+<a name="l00320"></a>00320             <span class="keywordflow">if</span>( k==0 ) {
+<a name="l00321"></a>00321                 destroy(t);
+<a name="l00322"></a>00322                 <span class="keywordflow">return</span> NULL;
+<a name="l00323"></a>00323             } <span class="keywordflow">else</span> {
+<a name="l00324"></a>00324                 t.my_size = k;
+<a name="l00325"></a>00325                 <span class="keywordflow">return</span> &amp;t;
+<a name="l00326"></a>00326             }
+<a name="l00327"></a>00327         }
+<a name="l00328"></a>00328 
+<a name="l00329"></a>00329         <span class="keyword">inline</span> task* run( std::forward_iterator_tag* ) { <span class="keywordflow">return</span> run_for_forward_iterator(); }
+<a name="l00330"></a>00330 
+<a name="l00331"></a>00331         task* run_for_forward_iterator() {
+<a name="l00332"></a>00332             <span class="keyword">typedef</span> do_group_task_forward&lt;Iterator, Body, Item&gt; block_type;
+<a name="l00333"></a>00333 
+<a name="l00334"></a>00334             Iterator first = my_first;
+<a name="l00335"></a>00335             size_t k=0; 
+<a name="l00336"></a>00336             <span class="keywordflow">while</span>( !(my_first==my_last) ) {
+<a name="l00337"></a>00337                 ++my_first;
+<a name="l00338"></a>00338                 <span class="keywordflow">if</span>( ++k==block_type::max_arg_size ) {
+<a name="l00339"></a>00339                     <span class="keywordflow">if</span> ( !(my_first==my_last) )
+<a name="l00340"></a>00340                         recycle_to_reexecute();
+<a name="l00341"></a>00341                     <span class="keywordflow">break</span>;
+<a name="l00342"></a>00342                 }
+<a name="l00343"></a>00343             }
+<a name="l00344"></a>00344             <span class="keywordflow">return</span> k==0 ? NULL : <span class="keyword">new</span>( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder);
+<a name="l00345"></a>00345         }
+<a name="l00346"></a>00346         
+<a name="l00347"></a>00347         <span class="keyword">inline</span> task* run( std::random_access_iterator_tag* ) { <span class="keywordflow">return</span> run_for_random_access_iterator(); }
+<a name="l00348"></a>00348 
+<a name="l00349"></a>00349         task* run_for_random_access_iterator() {
+<a name="l00350"></a>00350             <span class="keyword">typedef</span> do_group_task_forward&lt;Iterator, Body, Item&gt; block_type;
+<a name="l00351"></a>00351             <span class="keyword">typedef</span> do_iteration_task_iter&lt;Iterator, Body, Item&gt; iteration_type;
+<a name="l00352"></a>00352             
+<a name="l00353"></a>00353             size_t k = static_cast&lt;size_t&gt;(my_last-my_first); 
+<a name="l00354"></a>00354             <span class="keywordflow">if</span>( k &gt; block_type::max_arg_size ) {
+<a name="l00355"></a>00355                 Iterator middle = my_first + k/2;
+<a name="l00356"></a>00356 
+<a name="l00357"></a>00357                 empty_task&amp; c = *<span class="keyword">new</span>( allocate_continuation() ) empty_task;
+<a name="l00358"></a>00358                 do_task_iter&amp; b = *<span class="keyword">new</span>( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder);
+<a name="l00359"></a>00359                 recycle_as_child_of(c);
+<a name="l00360"></a>00360 
+<a name="l00361"></a>00361                 my_last = middle;
+<a name="l00362"></a>00362                 c.set_ref_count(2);
+<a name="l00363"></a>00363                 c.spawn(b);
+<a name="l00364"></a>00364                 <span class="keywordflow">return</span> <span class="keyword">this</span>;
+<a name="l00365"></a>00365             }<span class="keywordflow">else</span> <span class="keywordflow">if</span>( k != 0 ) {
+<a name="l00366"></a>00366                 task_list list;
+<a name="l00367"></a>00367                 task* t; 
+<a name="l00368"></a>00368                 size_t k1=0; 
+<a name="l00369"></a>00369                 <span class="keywordflow">for</span>(;;) {
+<a name="l00370"></a>00370                     t = <span class="keyword">new</span>( allocate_child() ) iteration_type(my_first, my_feeder);
+<a name="l00371"></a>00371                     ++my_first;
+<a name="l00372"></a>00372                     <span class="keywordflow">if</span>( ++k1==k ) <span class="keywordflow">break</span>;
+<a name="l00373"></a>00373                     list.push_back(*t);
+<a name="l00374"></a>00374                 }
+<a name="l00375"></a>00375                 set_ref_count(<span class="keywordtype">int</span>(k+1));
+<a name="l00376"></a>00376                 spawn(list);
+<a name="l00377"></a>00377                 spawn_and_wait_for_all(*t);
+<a name="l00378"></a>00378             }
+<a name="l00379"></a>00379             <span class="keywordflow">return</span> NULL;
+<a name="l00380"></a>00380         }
+<a name="l00381"></a>00381     }; <span class="comment">// class do_task_iter</span>
+<a name="l00382"></a>00382 
+<a name="l00384"></a>00384 
+<a name="l00386"></a>00386     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt; 
+<a name="l00387"></a>00387     <span class="keywordtype">void</span> run_parallel_do( Iterator first, Iterator last, <span class="keyword">const</span> Body&amp; body
+<a name="l00388"></a>00388 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00389"></a>00389         , task_group_context&amp; context
+<a name="l00390"></a>00390 #endif
+<a name="l00391"></a>00391         )
+<a name="l00392"></a>00392     {
+<a name="l00393"></a>00393         <span class="keyword">typedef</span> do_task_iter&lt;Iterator, Body, Item&gt; root_iteration_task;
+<a name="l00394"></a>00394 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00395"></a>00395 <span class="preprocessor"></span>        parallel_do_feeder_impl&lt;Body, Item&gt; feeder(context);
+<a name="l00396"></a>00396 <span class="preprocessor">#else</span>
+<a name="l00397"></a>00397 <span class="preprocessor"></span>        parallel_do_feeder_impl&lt;Body, Item&gt; feeder;
+<a name="l00398"></a>00398 <span class="preprocessor">#endif</span>
+<a name="l00399"></a>00399 <span class="preprocessor"></span>        feeder.my_body = &amp;body;
+<a name="l00400"></a>00400 
+<a name="l00401"></a>00401         root_iteration_task &amp;t = *<span class="keyword">new</span>( feeder.my_barrier-&gt;allocate_child() ) root_iteration_task(first, last, feeder);
+<a name="l00402"></a>00402 
+<a name="l00403"></a>00403         feeder.my_barrier-&gt;set_ref_count(2);
+<a name="l00404"></a>00404         feeder.my_barrier-&gt;spawn_and_wait_for_all(t);
+<a name="l00405"></a>00405     }
+<a name="l00406"></a>00406 
+<a name="l00408"></a>00408 
+<a name="l00410"></a>00410     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item&gt; 
+<a name="l00411"></a>00411     <span class="keywordtype">void</span> select_parallel_do( Iterator first, Iterator last, <span class="keyword">const</span> Body&amp; body, <span class="keywordtype">void</span> (Body::*)(Item) <span class="keyword">const</span>
+<a name="l00412"></a>00412 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00413"></a>00413         , task_group_context&amp; context 
+<a name="l00414"></a>00414 #endif <span class="comment">// __TBB_TASK_GROUP_CONTEXT </span>
+<a name="l00415"></a>00415         )
+<a name="l00416"></a>00416     {
+<a name="l00417"></a>00417         run_parallel_do&lt;Iterator, Body, typename strip&lt;Item&gt;::type&gt;( first, last, body
+<a name="l00418"></a>00418 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00419"></a>00419 <span class="preprocessor"></span>            , context
+<a name="l00420"></a>00420 <span class="preprocessor">#endif // __TBB_TASK_GROUP_CONTEXT </span>
+<a name="l00421"></a>00421 <span class="preprocessor"></span>            );
+<a name="l00422"></a>00422     }
+<a name="l00423"></a>00423 
+<a name="l00425"></a>00425 
+<a name="l00427"></a>00427     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Item, <span class="keyword">typename</span> _Item&gt; 
+<a name="l00428"></a>00428     <span class="keywordtype">void</span> select_parallel_do( Iterator first, Iterator last, <span class="keyword">const</span> Body&amp; body, <span class="keywordtype">void</span> (Body::*)(Item, parallel_do_feeder&lt;_Item&gt;&amp;) <span class="keyword">const</span>
+<a name="l00429"></a>00429 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00430"></a>00430         , task_group_context&amp; context 
+<a name="l00431"></a>00431 #endif <span class="comment">// __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00432"></a>00432         )
+<a name="l00433"></a>00433     {
+<a name="l00434"></a>00434         run_parallel_do&lt;Iterator, Body, typename strip&lt;Item&gt;::type&gt;( first, last, body
+<a name="l00435"></a>00435 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00436"></a>00436 <span class="preprocessor"></span>            , context
+<a name="l00437"></a>00437 <span class="preprocessor">#endif // __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00438"></a>00438 <span class="preprocessor"></span>            );
+<a name="l00439"></a>00439     }
+<a name="l00440"></a>00440 
+<a name="l00441"></a>00441 } <span class="comment">// namespace internal</span>
+<a name="l00443"></a>00443 <span class="comment"></span>
+<a name="l00444"></a>00444 
+<a name="l00467"></a>00467 
+<a name="l00468"></a>00468 
+<a name="l00469"></a>00469 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Iterator, <span class="keyword">typename</span> Body&gt; 
+<a name="l00470"></a><a class="code" href="a00280.html#g3383e2703977012b6f384d673410f1f7">00470</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g3383e2703977012b6f384d673410f1f7">parallel_do</a>( Iterator first, Iterator last, <span class="keyword">const</span> Body&amp; body )
+<a name="l00471"></a>00471 {
+<a name="l00472"></a>00472     <span class="keywordflow">if</span> ( first == last )
+<a name="l00473"></a>00473         <span class="keywordflow">return</span>;
+<a name="l00474"></a>00474 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00475"></a>00475 <span class="preprocessor"></span>    <a class="code" href="a00206.html">task_group_context</a> context;
+<a name="l00476"></a>00476 <span class="preprocessor">#endif // __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00477"></a>00477 <span class="preprocessor"></span>    internal::select_parallel_do( first, last, body, &amp;Body::operator()
+<a name="l00478"></a>00478 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00479"></a>00479         , context
+<a name="l00480"></a>00480 #endif <span class="comment">// __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00481"></a>00481         );
+<a name="l00482"></a>00482 }
+<a name="l00483"></a>00483 
+<a name="l00484"></a>00484 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00486"></a>00486 <span class="preprocessor"></span>
+<a name="l00487"></a>00487 <span class="preprocessor">template&lt;typename Iterator, typename Body&gt; </span>
+<a name="l00488"></a><a class="code" href="a00280.html#g2617dc9b88b3285a7212599d49f74228">00488</a> <span class="preprocessor"></span><span class="keywordtype">void</span> <a class="code" href="a00280.html#g3383e2703977012b6f384d673410f1f7">parallel_do</a>( Iterator first, Iterator last, <span class="keyword">const</span> Body&amp; body, <a class="code" href="a00206.html">task_group_context</a>&amp; context  )
+<a name="l00489"></a>00489 {
+<a name="l00490"></a>00490     <span class="keywordflow">if</span> ( first == last )
+<a name="l00491"></a>00491         <span class="keywordflow">return</span>;
+<a name="l00492"></a>00492     internal::select_parallel_do( first, last, body, &amp;Body::operator(), context );
+<a name="l00493"></a>00493 }
+<a name="l00494"></a>00494 <span class="preprocessor">#endif // __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00495"></a>00495 <span class="preprocessor"></span>
+<a name="l00497"></a>00497 
+<a name="l00498"></a>00498 } <span class="comment">// namespace </span>
+<a name="l00499"></a>00499 
+<a name="l00500"></a>00500 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_do_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00385.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00385.html
new file mode 100644 (file)
index 0000000..3438876
--- /dev/null
@@ -0,0 +1,234 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_for.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_for.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_for_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_for_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "partitioner.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="keyword">namespace </span>tbb {
+<a name="l00031"></a>00031 
+<a name="l00033"></a>00033 <span class="keyword">namespace </span>internal {
+<a name="l00034"></a>00034 
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00038"></a>00038     <span class="keyword">class </span>start_for: <span class="keyword">public</span> task {
+<a name="l00039"></a>00039         Range my_range;
+<a name="l00040"></a>00040         <span class="keyword">const</span> Body my_body;
+<a name="l00041"></a>00041         <span class="keyword">typename</span> Partitioner::partition_type my_partition;
+<a name="l00042"></a>00042         <span class="comment">/*override*/</span> task* execute();
+<a name="l00043"></a>00043 
+<a name="l00045"></a>00045         start_for( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, Partitioner&amp; partitioner ) :
+<a name="l00046"></a>00046             my_range(range),    
+<a name="l00047"></a>00047             my_body(body),
+<a name="l00048"></a>00048             my_partition(partitioner)
+<a name="l00049"></a>00049         {
+<a name="l00050"></a>00050         }
+<a name="l00052"></a>00052 
+<a name="l00053"></a>00053         start_for( start_for&amp; parent_, split ) :
+<a name="l00054"></a>00054             my_range(parent_.my_range,split()),    
+<a name="l00055"></a>00055             my_body(parent_.my_body),
+<a name="l00056"></a>00056             my_partition(parent_.my_partition,split())
+<a name="l00057"></a>00057         {
+<a name="l00058"></a>00058             my_partition.set_affinity(*<span class="keyword">this</span>);
+<a name="l00059"></a>00059         }
+<a name="l00061"></a>00061         <span class="comment">/*override*/</span> <span class="keywordtype">void</span> note_affinity( affinity_id <span class="keywordtype">id</span> ) {
+<a name="l00062"></a>00062             my_partition.note_affinity( <span class="keywordtype">id</span> );
+<a name="l00063"></a>00063         }
+<a name="l00064"></a>00064     <span class="keyword">public</span>:
+<a name="l00065"></a>00065         <span class="keyword">static</span> <span class="keywordtype">void</span> run(  <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> Partitioner&amp; partitioner ) {
+<a name="l00066"></a>00066             <span class="keywordflow">if</span>( !range.empty() ) {
+<a name="l00067"></a>00067 #<span class="keywordflow">if</span> !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
+<a name="l00068"></a>00068                 start_for&amp; a = *<span class="keyword">new</span>(<a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>()) start_for(range,body,const_cast&lt;Partitioner&amp;&gt;(partitioner));
+<a name="l00069"></a>00069 <span class="preprocessor">#else</span>
+<a name="l00070"></a>00070 <span class="preprocessor"></span>                <span class="comment">// Bound context prevents exceptions from body to affect nesting or sibling algorithms,</span>
+<a name="l00071"></a>00071                 <span class="comment">// and allows users to handle exceptions safely by wrapping parallel_for in the try-block.</span>
+<a name="l00072"></a>00072                 task_group_context context;
+<a name="l00073"></a>00073                 start_for&amp; a = *<span class="keyword">new</span>(task::allocate_root(context)) start_for(range,body,const_cast&lt;Partitioner&amp;&gt;(partitioner));
+<a name="l00074"></a>00074 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT &amp;&amp; !TBB_JOIN_OUTER_TASK_GROUP */</span>
+<a name="l00075"></a>00075                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>(a);
+<a name="l00076"></a>00076             }
+<a name="l00077"></a>00077         }
+<a name="l00078"></a>00078 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00079"></a>00079 <span class="preprocessor"></span>        <span class="keyword">static</span> <span class="keywordtype">void</span> run(  <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> Partitioner&amp; partitioner, task_group_context&amp; context ) {
+<a name="l00080"></a>00080             <span class="keywordflow">if</span>( !range.empty() ) {
+<a name="l00081"></a>00081                 start_for&amp; a = *<span class="keyword">new</span>(task::allocate_root(context)) start_for(range,body,const_cast&lt;Partitioner&amp;&gt;(partitioner));
+<a name="l00082"></a>00082                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>(a);
+<a name="l00083"></a>00083             }
+<a name="l00084"></a>00084         }
+<a name="l00085"></a>00085 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00086"></a>00086     };
+<a name="l00087"></a>00087 
+<a name="l00088"></a>00088     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00089"></a>00089     task* start_for&lt;Range,Body,Partitioner&gt;::execute() {
+<a name="l00090"></a>00090         <span class="keywordflow">if</span>( !my_range.is_divisible() || my_partition.should_execute_range(*<span class="keyword">this</span>) ) {
+<a name="l00091"></a>00091             my_body( my_range );
+<a name="l00092"></a>00092             <span class="keywordflow">return</span> my_partition.continue_after_execute_range(); 
+<a name="l00093"></a>00093         } <span class="keywordflow">else</span> {
+<a name="l00094"></a>00094             empty_task&amp; c = *<span class="keyword">new</span>( this-&gt;allocate_continuation() ) empty_task;
+<a name="l00095"></a>00095             recycle_as_child_of(c);
+<a name="l00096"></a>00096             c.set_ref_count(2);
+<a name="l00097"></a>00097             <span class="keywordtype">bool</span> delay = my_partition.decide_whether_to_delay();
+<a name="l00098"></a>00098             start_for&amp; b = *<span class="keyword">new</span>( c.allocate_child() ) start_for(*<span class="keyword">this</span>,split());
+<a name="l00099"></a>00099             my_partition.spawn_or_delay(delay,b);
+<a name="l00100"></a>00100             <span class="keywordflow">return</span> <span class="keyword">this</span>;
+<a name="l00101"></a>00101         }
+<a name="l00102"></a>00102     } 
+<a name="l00103"></a>00103 } <span class="comment">// namespace internal</span>
+<a name="l00105"></a>00105 <span class="comment"></span>
+<a name="l00106"></a>00106 
+<a name="l00107"></a>00107 <span class="comment">// Requirements on Range concept are documented in blocked_range.h</span>
+<a name="l00108"></a>00108 
+<a name="l00119"></a>00119 
+<a name="l00121"></a>00121 
+<a name="l00122"></a>00122 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00123"></a><a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">00123</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body ) {
+<a name="l00124"></a>00124     internal::start_for&lt;Range,Body,__TBB_DEFAULT_PARTITIONER&gt;::run(range,body,__TBB_DEFAULT_PARTITIONER());
+<a name="l00125"></a>00125 }
+<a name="l00126"></a>00126 
+<a name="l00128"></a>00128 
+<a name="l00129"></a>00129 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00130"></a><a class="code" href="a00280.html#g13cac5dd55c7533bccea43a51c33d0e5">00130</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner ) {
+<a name="l00131"></a>00131     internal::start_for&lt;Range,Body,simple_partitioner&gt;::run(range,body,partitioner);
+<a name="l00132"></a>00132 }
+<a name="l00133"></a>00133 
+<a name="l00135"></a>00135 
+<a name="l00136"></a>00136 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00137"></a><a class="code" href="a00280.html#ga7ac75d532389b55b9247f3fdb0b00d1">00137</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner ) {
+<a name="l00138"></a>00138     internal::start_for&lt;Range,Body,auto_partitioner&gt;::run(range,body,partitioner);
+<a name="l00139"></a>00139 }
+<a name="l00140"></a>00140 
+<a name="l00142"></a>00142 
+<a name="l00143"></a>00143 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00144"></a><a class="code" href="a00280.html#g9cd1b210ceb1c040f30e390b4a21bde8">00144</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner ) {
+<a name="l00145"></a>00145     internal::start_for&lt;Range,Body,affinity_partitioner&gt;::run(range,body,partitioner);
+<a name="l00146"></a>00146 }
+<a name="l00147"></a>00147 
+<a name="l00148"></a>00148 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00150"></a>00150 <span class="preprocessor"></span>
+<a name="l00151"></a>00151 <span class="preprocessor">template&lt;typename Range, typename Body&gt;</span>
+<a name="l00152"></a><a class="code" href="a00280.html#g2d317a5e0078cd193125439fed60dfdc">00152</a> <span class="preprocessor"></span><span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00153"></a>00153     internal::start_for&lt;Range,Body,simple_partitioner&gt;::run(range, body, partitioner, context);
+<a name="l00154"></a>00154 }
+<a name="l00155"></a>00155 
+<a name="l00157"></a>00157 
+<a name="l00158"></a>00158 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00159"></a><a class="code" href="a00280.html#g1c0700e3f85e83a788ff3ede88ebb7e9">00159</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00160"></a>00160     internal::start_for&lt;Range,Body,auto_partitioner&gt;::run(range, body, partitioner, context);
+<a name="l00161"></a>00161 }
+<a name="l00162"></a>00162 
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00166"></a><a class="code" href="a00280.html#g04b4696b67370c01353ff5974c8f1196">00166</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Body&amp; body, <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00167"></a>00167     internal::start_for&lt;Range,Body,affinity_partitioner&gt;::run(range,body,partitioner, context);
+<a name="l00168"></a>00168 }
+<a name="l00169"></a>00169 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00170"></a>00170 
+<a name="l00171"></a>00171 
+<a name="l00173"></a>00173 <span class="keyword">namespace </span>internal {
+<a name="l00175"></a>00175 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Function, <span class="keyword">typename</span> Index&gt;
+<a name="l00176"></a>00176 <span class="keyword">class </span>parallel_for_body : internal::no_assign {
+<a name="l00177"></a>00177     <span class="keyword">const</span> Function &amp;my_func;
+<a name="l00178"></a>00178     <span class="keyword">const</span> Index my_begin;
+<a name="l00179"></a>00179     <span class="keyword">const</span> Index my_step; 
+<a name="l00180"></a>00180 <span class="keyword">public</span>:
+<a name="l00181"></a>00181     parallel_for_body( <span class="keyword">const</span> Function&amp; _func, Index&amp; _begin, Index&amp; _step) 
+<a name="l00182"></a>00182         : my_func(_func), my_begin(_begin), my_step(_step) {}
+<a name="l00183"></a>00183     
+<a name="l00184"></a>00184     <span class="keywordtype">void</span> operator()( <a class="code" href="a00152.html">tbb::blocked_range&lt;Index&gt;</a>&amp; r )<span class="keyword"> const </span>{
+<a name="l00185"></a>00185         <span class="keywordflow">for</span>( Index i = r.<a class="code" href="a00152.html#18d2258400756ac1446dac7676b18df3">begin</a>(),  k = my_begin + i * my_step; i &lt; r.<a class="code" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">end</a>(); i++, k = k + my_step)
+<a name="l00186"></a>00186             my_func( k );
+<a name="l00187"></a>00187     }
+<a name="l00188"></a>00188 };
+<a name="l00189"></a>00189 } <span class="comment">// namespace internal</span>
+<a name="l00191"></a>00191 <span class="comment"></span>
+<a name="l00192"></a>00192 <span class="keyword">namespace </span>strict_ppl {
+<a name="l00193"></a>00193 
+<a name="l00195"></a>00195 
+<a name="l00196"></a>00196 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Index, <span class="keyword">typename</span> Function&gt;
+<a name="l00197"></a><a class="code" href="a00279.html#15c764c70c8a32e7a4b8c291d0cc8dde">00197</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(Index first, Index last, Index step, <span class="keyword">const</span> Function&amp; f) {
+<a name="l00198"></a>00198     <a class="code" href="a00206.html">tbb::task_group_context</a> context;
+<a name="l00199"></a>00199     <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(first, last, step, f, context);
+<a name="l00200"></a>00200 }
+<a name="l00201"></a>00201 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Index, <span class="keyword">typename</span> Function&gt;
+<a name="l00202"></a>00202 <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(Index first, Index last, Index step, <span class="keyword">const</span> Function&amp; f, <a class="code" href="a00206.html">tbb::task_group_context</a> &amp;context) {
+<a name="l00203"></a>00203     <span class="keywordflow">if</span> (step &lt;= 0 )
+<a name="l00204"></a>00204         internal::throw_exception(internal::eid_nonpositive_step); <span class="comment">// throws std::invalid_argument</span>
+<a name="l00205"></a>00205     <span class="keywordflow">else</span> <span class="keywordflow">if</span> (last &gt; first) {
+<a name="l00206"></a>00206         <span class="comment">// Above "else" avoids "potential divide by zero" warning on some platforms</span>
+<a name="l00207"></a>00207         Index end = (last - first - Index(1)) / step + Index(1);
+<a name="l00208"></a>00208         <a class="code" href="a00152.html">tbb::blocked_range&lt;Index&gt;</a> range(static_cast&lt;Index&gt;(0), end);
+<a name="l00209"></a>00209         internal::parallel_for_body&lt;Function, Index&gt; body(f, first, step);
+<a name="l00210"></a>00210         <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">tbb::parallel_for</a>(range, body, <a class="code" href="a00150.html">tbb::auto_partitioner</a>(), context);
+<a name="l00211"></a>00211     }
+<a name="l00212"></a>00212 }
+<a name="l00214"></a>00214 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Index, <span class="keyword">typename</span> Function&gt;
+<a name="l00215"></a><a class="code" href="a00279.html#490399525b1e690ec31d6db964c6b272">00215</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(Index first, Index last, <span class="keyword">const</span> Function&amp; f) {
+<a name="l00216"></a>00216     <a class="code" href="a00206.html">tbb::task_group_context</a> context;
+<a name="l00217"></a>00217     <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(first, last, static_cast&lt;Index&gt;(1), f, context);
+<a name="l00218"></a>00218 }
+<a name="l00219"></a>00219 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Index, <span class="keyword">typename</span> Function&gt;
+<a name="l00220"></a>00220 <span class="keywordtype">void</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(Index first, Index last, <span class="keyword">const</span> Function&amp; f, <a class="code" href="a00206.html">tbb::task_group_context</a> &amp;context) {
+<a name="l00221"></a>00221     <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>(first, last, static_cast&lt;Index&gt;(1), f, context);
+<a name="l00222"></a>00222 }
+<a name="l00223"></a>00223 
+<a name="l00225"></a>00225 
+<a name="l00226"></a>00226 } <span class="comment">// namespace strict_ppl</span>
+<a name="l00227"></a>00227 
+<a name="l00228"></a>00228 <span class="keyword">using</span> <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">strict_ppl::parallel_for</a>;
+<a name="l00229"></a>00229 
+<a name="l00230"></a>00230 } <span class="comment">// namespace tbb</span>
+<a name="l00231"></a>00231 
+<a name="l00232"></a>00232 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_for_H */</span>
+<a name="l00233"></a>00233 
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00389.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00389.html
new file mode 100644 (file)
index 0000000..d57acb8
--- /dev/null
@@ -0,0 +1,92 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_for_each.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_for_each.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_for_each_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_for_each_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "parallel_do.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>internal {
+<a name="l00030"></a>00030     <span class="comment">// The class calls user function in operator()</span>
+<a name="l00031"></a>00031     <span class="keyword">template</span> &lt;<span class="keyword">typename</span> Function, <span class="keyword">typename</span> Iterator&gt;
+<a name="l00032"></a>00032     <span class="keyword">class </span>parallel_for_each_body : internal::no_assign {
+<a name="l00033"></a>00033         <span class="keyword">const</span> Function &amp;my_func;
+<a name="l00034"></a>00034     <span class="keyword">public</span>:
+<a name="l00035"></a>00035         parallel_for_each_body(<span class="keyword">const</span> Function &amp;_func) : my_func(_func) {}
+<a name="l00036"></a>00036         parallel_for_each_body(<span class="keyword">const</span> parallel_for_each_body&lt;Function, Iterator&gt; &amp;_caller) : my_func(_caller.my_func) {}
+<a name="l00037"></a>00037 
+<a name="l00038"></a>00038         <span class="keywordtype">void</span> operator() ( <span class="keyword">typename</span> std::iterator_traits&lt;Iterator&gt;::value_type&amp; value )<span class="keyword"> const </span>{
+<a name="l00039"></a>00039             my_func(value);
+<a name="l00040"></a>00040         }
+<a name="l00041"></a>00041     };
+<a name="l00042"></a>00042 } <span class="comment">// namespace internal</span>
+<a name="l00044"></a>00044 <span class="comment"></span>
+<a name="l00048"></a>00048 
+<a name="l00049"></a>00049 
+<a name="l00050"></a>00050 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> InputIterator, <span class="keyword">typename</span> Function&gt;
+<a name="l00051"></a><a class="code" href="a00280.html#gcd40c32f319747e61a8f73fcfc452001">00051</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#gcd40c32f319747e61a8f73fcfc452001">parallel_for_each</a>(InputIterator first, InputIterator last, <span class="keyword">const</span> Function&amp; f, <a class="code" href="a00206.html">task_group_context</a> &amp;context) {
+<a name="l00052"></a>00052     internal::parallel_for_each_body&lt;Function, InputIterator&gt; body(f);
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054     <a class="code" href="a00280.html#g3383e2703977012b6f384d673410f1f7">tbb::parallel_do</a> (first, last, body, context);
+<a name="l00055"></a>00055 }
+<a name="l00056"></a>00056 
+<a name="l00058"></a>00058 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> InputIterator, <span class="keyword">typename</span> Function&gt;
+<a name="l00059"></a><a class="code" href="a00280.html#gc2d710ca573f0a9bd94379cba3772def">00059</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#gcd40c32f319747e61a8f73fcfc452001">parallel_for_each</a>(InputIterator first, InputIterator last, <span class="keyword">const</span> Function&amp; f) {
+<a name="l00060"></a>00060     internal::parallel_for_each_body&lt;Function, InputIterator&gt; body(f);
+<a name="l00061"></a>00061 
+<a name="l00062"></a>00062     <a class="code" href="a00280.html#g3383e2703977012b6f384d673410f1f7">tbb::parallel_do</a> (first, last, body);
+<a name="l00063"></a>00063 }
+<a name="l00064"></a>00064 
+<a name="l00066"></a>00066 
+<a name="l00067"></a>00067 } <span class="comment">// namespace</span>
+<a name="l00068"></a>00068 
+<a name="l00069"></a>00069 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_for_each_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00391.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00391.html
new file mode 100644 (file)
index 0000000..3685727
--- /dev/null
@@ -0,0 +1,374 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_invoke.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_invoke.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_invoke_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_invoke_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>internal {
+<a name="l00030"></a>00030     <span class="comment">// Simple task object, executing user method</span>
+<a name="l00031"></a>00031     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> function&gt;
+<a name="l00032"></a>00032     <span class="keyword">class </span>function_invoker : <span class="keyword">public</span> task{
+<a name="l00033"></a>00033     <span class="keyword">public</span>:
+<a name="l00034"></a>00034         function_invoker(<span class="keyword">const</span> function&amp; _function) : my_function(_function) {}
+<a name="l00035"></a>00035     <span class="keyword">private</span>:
+<a name="l00036"></a>00036         <span class="keyword">const</span> function &amp;my_function;
+<a name="l00037"></a>00037         <span class="comment">/*override*/</span>
+<a name="l00038"></a>00038         task* execute()
+<a name="l00039"></a>00039         {
+<a name="l00040"></a>00040             my_function();
+<a name="l00041"></a>00041             <span class="keywordflow">return</span> NULL;
+<a name="l00042"></a>00042         }
+<a name="l00043"></a>00043     };
+<a name="l00044"></a>00044 
+<a name="l00045"></a>00045     <span class="comment">// The class spawns two or three child tasks</span>
+<a name="l00046"></a>00046     <span class="keyword">template</span> &lt;size_t N, <span class="keyword">typename</span> function1, <span class="keyword">typename</span> function2, <span class="keyword">typename</span> function3&gt;
+<a name="l00047"></a>00047     <span class="keyword">class </span>spawner : <span class="keyword">public</span> task {
+<a name="l00048"></a>00048     <span class="keyword">private</span>:
+<a name="l00049"></a>00049         <span class="keyword">const</span> function1&amp; my_func1;
+<a name="l00050"></a>00050         <span class="keyword">const</span> function2&amp; my_func2;
+<a name="l00051"></a>00051         <span class="keyword">const</span> function3&amp; my_func3;
+<a name="l00052"></a>00052         <span class="keywordtype">bool</span> is_recycled;
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054         task* execute (){
+<a name="l00055"></a>00055             <span class="keywordflow">if</span>(is_recycled){
+<a name="l00056"></a>00056                 <span class="keywordflow">return</span> NULL;
+<a name="l00057"></a>00057             }<span class="keywordflow">else</span>{
+<a name="l00058"></a>00058                 __TBB_ASSERT(N==2 || N==3, <span class="stringliteral">"Number of arguments passed to spawner is wrong"</span>);
+<a name="l00059"></a>00059                 set_ref_count(N);
+<a name="l00060"></a>00060                 recycle_as_safe_continuation();
+<a name="l00061"></a>00061                 internal::function_invoker&lt;function2&gt;* invoker2 = <span class="keyword">new</span> (allocate_child()) internal::function_invoker&lt;function2&gt;(my_func2);
+<a name="l00062"></a>00062                 __TBB_ASSERT(invoker2, <span class="stringliteral">"Child task allocation failed"</span>);
+<a name="l00063"></a>00063                 spawn(*invoker2);
+<a name="l00064"></a>00064                 size_t n = N; <span class="comment">// To prevent compiler warnings</span>
+<a name="l00065"></a>00065                 <span class="keywordflow">if</span> (n&gt;2) {
+<a name="l00066"></a>00066                     internal::function_invoker&lt;function3&gt;* invoker3 = <span class="keyword">new</span> (allocate_child()) internal::function_invoker&lt;function3&gt;(my_func3);
+<a name="l00067"></a>00067                     __TBB_ASSERT(invoker3, <span class="stringliteral">"Child task allocation failed"</span>);
+<a name="l00068"></a>00068                     spawn(*invoker3);
+<a name="l00069"></a>00069                 }
+<a name="l00070"></a>00070                 my_func1();
+<a name="l00071"></a>00071                 is_recycled = <span class="keyword">true</span>;
+<a name="l00072"></a>00072                 <span class="keywordflow">return</span> NULL;
+<a name="l00073"></a>00073             }
+<a name="l00074"></a>00074         } <span class="comment">// execute</span>
+<a name="l00075"></a>00075 
+<a name="l00076"></a>00076     <span class="keyword">public</span>:
+<a name="l00077"></a>00077         spawner(<span class="keyword">const</span> function1&amp; _func1, <span class="keyword">const</span> function2&amp; _func2, <span class="keyword">const</span> function3&amp; _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {}
+<a name="l00078"></a>00078     };
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080     <span class="comment">// Creates and spawns child tasks</span>
+<a name="l00081"></a>00081     <span class="keyword">class </span>parallel_invoke_helper : <span class="keyword">public</span> empty_task {
+<a name="l00082"></a>00082     <span class="keyword">public</span>:
+<a name="l00083"></a>00083         <span class="comment">// Dummy functor class</span>
+<a name="l00084"></a>00084         <span class="keyword">class </span>parallel_invoke_noop {
+<a name="l00085"></a>00085         <span class="keyword">public</span>:
+<a name="l00086"></a>00086             <span class="keywordtype">void</span> operator() ()<span class="keyword"> const </span>{}
+<a name="l00087"></a>00087         };
+<a name="l00088"></a>00088         <span class="comment">// Creates a helper object with user-defined number of children expected</span>
+<a name="l00089"></a>00089         parallel_invoke_helper(<span class="keywordtype">int</span> number_of_children)
+<a name="l00090"></a>00090         {
+<a name="l00091"></a>00091             set_ref_count(number_of_children + 1);
+<a name="l00092"></a>00092         }
+<a name="l00093"></a>00093         <span class="comment">// Adds child task and spawns it</span>
+<a name="l00094"></a>00094         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> function&gt;
+<a name="l00095"></a>00095         <span class="keywordtype">void</span> add_child (<span class="keyword">const</span> function &amp;_func)
+<a name="l00096"></a>00096         {
+<a name="l00097"></a>00097             internal::function_invoker&lt;function&gt;* invoker = <span class="keyword">new</span> (allocate_child()) internal::function_invoker&lt;function&gt;(_func);
+<a name="l00098"></a>00098             __TBB_ASSERT(invoker, <span class="stringliteral">"Child task allocation failed"</span>);
+<a name="l00099"></a>00099             spawn(*invoker);
+<a name="l00100"></a>00100         }
+<a name="l00101"></a>00101 
+<a name="l00102"></a>00102         <span class="comment">// Adds a task with multiple child tasks and spawns it</span>
+<a name="l00103"></a>00103         <span class="comment">// two arguments</span>
+<a name="l00104"></a>00104         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> function1, <span class="keyword">typename</span> function2&gt;
+<a name="l00105"></a>00105         <span class="keywordtype">void</span> add_children (<span class="keyword">const</span> function1&amp; _func1, <span class="keyword">const</span> function2&amp; _func2)
+<a name="l00106"></a>00106         {
+<a name="l00107"></a>00107             <span class="comment">// The third argument is dummy, it is ignored actually.</span>
+<a name="l00108"></a>00108             parallel_invoke_noop noop;
+<a name="l00109"></a>00109             internal::spawner&lt;2, function1, function2, parallel_invoke_noop&gt;&amp; sub_root = *<span class="keyword">new</span>(allocate_child())internal::spawner&lt;2, function1, function2, parallel_invoke_noop&gt;(_func1, _func2, noop);
+<a name="l00110"></a>00110             spawn(sub_root);
+<a name="l00111"></a>00111         }
+<a name="l00112"></a>00112         <span class="comment">// three arguments</span>
+<a name="l00113"></a>00113         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> function1, <span class="keyword">typename</span> function2, <span class="keyword">typename</span> function3&gt;
+<a name="l00114"></a>00114         <span class="keywordtype">void</span> add_children (<span class="keyword">const</span> function1&amp; _func1, <span class="keyword">const</span> function2&amp; _func2, <span class="keyword">const</span> function3&amp; _func3)
+<a name="l00115"></a>00115         {
+<a name="l00116"></a>00116             internal::spawner&lt;3, function1, function2, function3&gt;&amp; sub_root = *<span class="keyword">new</span>(allocate_child())internal::spawner&lt;3, function1, function2, function3&gt;(_func1, _func2, _func3);
+<a name="l00117"></a>00117             spawn(sub_root);
+<a name="l00118"></a>00118         }
+<a name="l00119"></a>00119 
+<a name="l00120"></a>00120         <span class="comment">// Waits for all child tasks</span>
+<a name="l00121"></a>00121         <span class="keyword">template</span> &lt;<span class="keyword">typename</span> F0&gt;
+<a name="l00122"></a>00122         <span class="keywordtype">void</span> run_and_finish(<span class="keyword">const</span> F0&amp; f0)
+<a name="l00123"></a>00123         {
+<a name="l00124"></a>00124             internal::function_invoker&lt;F0&gt;* invoker = <span class="keyword">new</span> (allocate_child()) internal::function_invoker&lt;F0&gt;(f0);
+<a name="l00125"></a>00125             __TBB_ASSERT(invoker, <span class="stringliteral">"Child task allocation failed"</span>);
+<a name="l00126"></a>00126             spawn_and_wait_for_all(*invoker);
+<a name="l00127"></a>00127         }
+<a name="l00128"></a>00128     };
+<a name="l00129"></a>00129     <span class="comment">// The class destroys root if exception occured as well as in normal case</span>
+<a name="l00130"></a>00130     <span class="keyword">class </span>parallel_invoke_cleaner: internal::no_copy { 
+<a name="l00131"></a>00131     <span class="keyword">public</span>:
+<a name="l00132"></a>00132         parallel_invoke_cleaner(<span class="keywordtype">int</span> number_of_children, <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context) : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children))
+<a name="l00133"></a>00133         {}
+<a name="l00134"></a>00134         ~parallel_invoke_cleaner(){
+<a name="l00135"></a>00135             root.destroy(root);
+<a name="l00136"></a>00136         }
+<a name="l00137"></a>00137         internal::parallel_invoke_helper&amp; root;
+<a name="l00138"></a>00138     };
+<a name="l00139"></a>00139 } <span class="comment">// namespace internal</span>
+<a name="l00141"></a>00141 <span class="comment"></span>
+<a name="l00145"></a>00145 
+<a name="l00146"></a>00146 
+<a name="l00148"></a>00148 <span class="comment">// parallel_invoke with user-defined context</span>
+<a name="l00149"></a>00149 <span class="comment">// two arguments</span>
+<a name="l00150"></a>00150 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1 &gt;
+<a name="l00151"></a><a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">00151</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context) {
+<a name="l00152"></a>00152     internal::parallel_invoke_cleaner cleaner(2, context);
+<a name="l00153"></a>00153     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00154"></a>00154 
+<a name="l00155"></a>00155     root.add_child(f1);
+<a name="l00156"></a>00156 
+<a name="l00157"></a>00157     root.run_and_finish(f0);
+<a name="l00158"></a>00158 }
+<a name="l00159"></a>00159 
+<a name="l00160"></a>00160 <span class="comment">// three arguments</span>
+<a name="l00161"></a>00161 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2 &gt;
+<a name="l00162"></a>00162 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context) {
+<a name="l00163"></a>00163     internal::parallel_invoke_cleaner cleaner(3, context);
+<a name="l00164"></a>00164     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00165"></a>00165 
+<a name="l00166"></a>00166     root.add_child(f2);
+<a name="l00167"></a>00167     root.add_child(f1);
+<a name="l00168"></a>00168 
+<a name="l00169"></a>00169     root.run_and_finish(f0);
+<a name="l00170"></a>00170 }
+<a name="l00171"></a>00171 
+<a name="l00172"></a>00172 <span class="comment">// four arguments</span>
+<a name="l00173"></a>00173 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3&gt;
+<a name="l00174"></a>00174 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3,
+<a name="l00175"></a>00175                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00176"></a>00176 {
+<a name="l00177"></a>00177     internal::parallel_invoke_cleaner cleaner(4, context);
+<a name="l00178"></a>00178     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00179"></a>00179 
+<a name="l00180"></a>00180     root.add_child(f3);
+<a name="l00181"></a>00181     root.add_child(f2);
+<a name="l00182"></a>00182     root.add_child(f1);
+<a name="l00183"></a>00183 
+<a name="l00184"></a>00184     root.run_and_finish(f0);
+<a name="l00185"></a>00185 }
+<a name="l00186"></a>00186 
+<a name="l00187"></a>00187 <span class="comment">// five arguments</span>
+<a name="l00188"></a>00188 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4 &gt;
+<a name="l00189"></a>00189 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00190"></a>00190                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00191"></a>00191 {
+<a name="l00192"></a>00192     internal::parallel_invoke_cleaner cleaner(3, context);
+<a name="l00193"></a>00193     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00194"></a>00194 
+<a name="l00195"></a>00195     root.add_children(f4, f3);
+<a name="l00196"></a>00196     root.add_children(f2, f1);
+<a name="l00197"></a>00197 
+<a name="l00198"></a>00198     root.run_and_finish(f0);
+<a name="l00199"></a>00199 }
+<a name="l00200"></a>00200 
+<a name="l00201"></a>00201 <span class="comment">// six arguments</span>
+<a name="l00202"></a>00202 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4, <span class="keyword">typename</span> F5&gt;
+<a name="l00203"></a>00203 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4, <span class="keyword">const</span> F5&amp; f5,
+<a name="l00204"></a>00204                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00205"></a>00205 {
+<a name="l00206"></a>00206     internal::parallel_invoke_cleaner cleaner(3, context);
+<a name="l00207"></a>00207     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00208"></a>00208 
+<a name="l00209"></a>00209     root.add_children(f5, f4, f3);
+<a name="l00210"></a>00210     root.add_children(f2, f1);
+<a name="l00211"></a>00211 
+<a name="l00212"></a>00212     root.run_and_finish(f0);
+<a name="l00213"></a>00213 }
+<a name="l00214"></a>00214 
+<a name="l00215"></a>00215 <span class="comment">// seven arguments</span>
+<a name="l00216"></a>00216 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4, <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6&gt;
+<a name="l00217"></a>00217 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00218"></a>00218                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6,
+<a name="l00219"></a>00219                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00220"></a>00220 {
+<a name="l00221"></a>00221     internal::parallel_invoke_cleaner cleaner(3, context);
+<a name="l00222"></a>00222     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224     root.add_children(f6, f5, f4);
+<a name="l00225"></a>00225     root.add_children(f3, f2, f1);
+<a name="l00226"></a>00226 
+<a name="l00227"></a>00227     root.run_and_finish(f0);
+<a name="l00228"></a>00228 }
+<a name="l00229"></a>00229 
+<a name="l00230"></a>00230 <span class="comment">// eight arguments</span>
+<a name="l00231"></a>00231 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4,
+<a name="l00232"></a>00232          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7&gt;
+<a name="l00233"></a>00233 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00234"></a>00234                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7,
+<a name="l00235"></a>00235                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00236"></a>00236 {
+<a name="l00237"></a>00237     internal::parallel_invoke_cleaner cleaner(4, context);
+<a name="l00238"></a>00238     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00239"></a>00239 
+<a name="l00240"></a>00240     root.add_children(f7, f6, f5);
+<a name="l00241"></a>00241     root.add_children(f4, f3);
+<a name="l00242"></a>00242     root.add_children(f2, f1);
+<a name="l00243"></a>00243 
+<a name="l00244"></a>00244     root.run_and_finish(f0);
+<a name="l00245"></a>00245 }
+<a name="l00246"></a>00246 
+<a name="l00247"></a>00247 <span class="comment">// nine arguments</span>
+<a name="l00248"></a>00248 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4,
+<a name="l00249"></a>00249          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7, <span class="keyword">typename</span> F8&gt;
+<a name="l00250"></a>00250 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00251"></a>00251                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7, <span class="keyword">const</span> F8&amp; f8,
+<a name="l00252"></a>00252                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00253"></a>00253 {
+<a name="l00254"></a>00254     internal::parallel_invoke_cleaner cleaner(4, context);
+<a name="l00255"></a>00255     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00256"></a>00256 
+<a name="l00257"></a>00257     root.add_children(f8, f7, f6);
+<a name="l00258"></a>00258     root.add_children(f5, f4, f3);
+<a name="l00259"></a>00259     root.add_children(f2, f1);
+<a name="l00260"></a>00260 
+<a name="l00261"></a>00261     root.run_and_finish(f0);
+<a name="l00262"></a>00262 }
+<a name="l00263"></a>00263 
+<a name="l00264"></a>00264 <span class="comment">// ten arguments</span>
+<a name="l00265"></a>00265 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4,
+<a name="l00266"></a>00266          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7, <span class="keyword">typename</span> F8, <span class="keyword">typename</span> F9&gt;
+<a name="l00267"></a>00267 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00268"></a>00268                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7, <span class="keyword">const</span> F8&amp; f8, <span class="keyword">const</span> F9&amp; f9,
+<a name="l00269"></a>00269                      <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context)
+<a name="l00270"></a>00270 {
+<a name="l00271"></a>00271     internal::parallel_invoke_cleaner cleaner(4, context);
+<a name="l00272"></a>00272     internal::parallel_invoke_helper&amp; root = cleaner.root;
+<a name="l00273"></a>00273 
+<a name="l00274"></a>00274     root.add_children(f9, f8, f7);
+<a name="l00275"></a>00275     root.add_children(f6, f5, f4);
+<a name="l00276"></a>00276     root.add_children(f3, f2, f1);
+<a name="l00277"></a>00277 
+<a name="l00278"></a>00278     root.run_and_finish(f0);
+<a name="l00279"></a>00279 }
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281 <span class="comment">// two arguments</span>
+<a name="l00282"></a>00282 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1&gt;
+<a name="l00283"></a>00283 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1) {
+<a name="l00284"></a>00284     task_group_context context;
+<a name="l00285"></a>00285     parallel_invoke&lt;F0, F1&gt;(f0, f1, context);
+<a name="l00286"></a>00286 }
+<a name="l00287"></a>00287 <span class="comment">// three arguments</span>
+<a name="l00288"></a>00288 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2&gt;
+<a name="l00289"></a>00289 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2) {
+<a name="l00290"></a>00290     task_group_context context;
+<a name="l00291"></a>00291     parallel_invoke&lt;F0, F1, F2&gt;(f0, f1, f2, context);
+<a name="l00292"></a>00292 }
+<a name="l00293"></a>00293 <span class="comment">// four arguments</span>
+<a name="l00294"></a>00294 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3 &gt;
+<a name="l00295"></a>00295 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3) {
+<a name="l00296"></a>00296     task_group_context context;
+<a name="l00297"></a>00297     parallel_invoke&lt;F0, F1, F2, F3&gt;(f0, f1, f2, f3, context);
+<a name="l00298"></a>00298 }
+<a name="l00299"></a>00299 <span class="comment">// five arguments</span>
+<a name="l00300"></a>00300 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4&gt;
+<a name="l00301"></a>00301 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4) {
+<a name="l00302"></a>00302     task_group_context context;
+<a name="l00303"></a>00303     parallel_invoke&lt;F0, F1, F2, F3, F4&gt;(f0, f1, f2, f3, f4, context);
+<a name="l00304"></a>00304 }
+<a name="l00305"></a>00305 <span class="comment">// six arguments</span>
+<a name="l00306"></a>00306 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4, <span class="keyword">typename</span> F5&gt;
+<a name="l00307"></a>00307 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4, <span class="keyword">const</span> F5&amp; f5) {
+<a name="l00308"></a>00308     task_group_context context;
+<a name="l00309"></a>00309     parallel_invoke&lt;F0, F1, F2, F3, F4, F5&gt;(f0, f1, f2, f3, f4, f5, context);
+<a name="l00310"></a>00310 }
+<a name="l00311"></a>00311 <span class="comment">// seven arguments</span>
+<a name="l00312"></a>00312 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4, <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6&gt;
+<a name="l00313"></a>00313 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00314"></a>00314                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6)
+<a name="l00315"></a>00315 {
+<a name="l00316"></a>00316     task_group_context context;
+<a name="l00317"></a>00317     parallel_invoke&lt;F0, F1, F2, F3, F4, F5, F6&gt;(f0, f1, f2, f3, f4, f5, f6, context);
+<a name="l00318"></a>00318 }
+<a name="l00319"></a>00319 <span class="comment">// eigth arguments</span>
+<a name="l00320"></a>00320 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4, 
+<a name="l00321"></a>00321          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7&gt;
+<a name="l00322"></a>00322 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00323"></a>00323                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7)
+<a name="l00324"></a>00324 {
+<a name="l00325"></a>00325     task_group_context context;
+<a name="l00326"></a>00326     parallel_invoke&lt;F0, F1, F2, F3, F4, F5, F6, F7&gt;(f0, f1, f2, f3, f4, f5, f6, f7, context);
+<a name="l00327"></a>00327 }
+<a name="l00328"></a>00328 <span class="comment">// nine arguments</span>
+<a name="l00329"></a>00329 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4,
+<a name="l00330"></a>00330          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7, <span class="keyword">typename</span> F8&gt;
+<a name="l00331"></a>00331 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00332"></a>00332                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7, <span class="keyword">const</span> F8&amp; f8)
+<a name="l00333"></a>00333 {
+<a name="l00334"></a>00334     task_group_context context;
+<a name="l00335"></a>00335     parallel_invoke&lt;F0, F1, F2, F3, F4, F5, F6, F7, F8&gt;(f0, f1, f2, f3, f4, f5, f6, f7, f8, context);
+<a name="l00336"></a>00336 }
+<a name="l00337"></a>00337 <span class="comment">// ten arguments</span>
+<a name="l00338"></a>00338 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F0, <span class="keyword">typename</span> F1, <span class="keyword">typename</span> F2, <span class="keyword">typename</span> F3, <span class="keyword">typename</span> F4,
+<a name="l00339"></a>00339          <span class="keyword">typename</span> F5, <span class="keyword">typename</span> F6, <span class="keyword">typename</span> F7, <span class="keyword">typename</span> F8, <span class="keyword">typename</span> F9&gt;
+<a name="l00340"></a>00340 <span class="keywordtype">void</span> <a class="code" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">parallel_invoke</a>(<span class="keyword">const</span> F0&amp; f0, <span class="keyword">const</span> F1&amp; f1, <span class="keyword">const</span> F2&amp; f2, <span class="keyword">const</span> F3&amp; f3, <span class="keyword">const</span> F4&amp; f4,
+<a name="l00341"></a>00341                      <span class="keyword">const</span> F5&amp; f5, <span class="keyword">const</span> F6&amp; f6, <span class="keyword">const</span> F7&amp; f7, <span class="keyword">const</span> F8&amp; f8, <span class="keyword">const</span> F9&amp; f9)
+<a name="l00342"></a>00342 {
+<a name="l00343"></a>00343     task_group_context context;
+<a name="l00344"></a>00344     parallel_invoke&lt;F0, F1, F2, F3, F4, F5, F6, F7, F8, F9&gt;(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context);
+<a name="l00345"></a>00345 }
+<a name="l00346"></a>00346 
+<a name="l00348"></a>00348 
+<a name="l00349"></a>00349 } <span class="comment">// namespace</span>
+<a name="l00350"></a>00350 
+<a name="l00351"></a>00351 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_invoke_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00397.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00397.html
new file mode 100644 (file)
index 0000000..e6b86f8
--- /dev/null
@@ -0,0 +1,360 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_reduce.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_reduce.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_reduce_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_reduce_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "partitioner.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>tbb {
+<a name="l00030"></a>00030 
+<a name="l00032"></a>00032 <span class="keyword">namespace </span>internal {
+<a name="l00033"></a>00033 
+<a name="l00035"></a>00035     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( <span class="keywordtype">void</span>* dst, <span class="keywordtype">void</span>* src );
+<a name="l00036"></a>00036 
+<a name="l00038"></a>00038     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( <span class="keyword">const</span> <span class="keywordtype">void</span>* src );
+<a name="l00039"></a>00039 
+<a name="l00040"></a>00040     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">inline</span> <span class="keywordtype">void</span> parallel_reduce_store_body( T*&amp; dst, T* src ) {
+<a name="l00041"></a>00041 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span>        itt_store_pointer_with_release_v3(&amp;dst,src);
+<a name="l00043"></a>00043 <span class="preprocessor">#else</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span>        __TBB_store_with_release(dst,src);
+<a name="l00045"></a>00045 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS */</span>
+<a name="l00046"></a>00046     }
+<a name="l00047"></a>00047 
+<a name="l00048"></a>00048     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">inline</span> T* parallel_reduce_load_body( T*&amp; src ) {
+<a name="l00049"></a>00049 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span>        <span class="keywordflow">return</span> static_cast&lt;T*&gt;(itt_load_pointer_with_acquire_v3(&amp;src));
+<a name="l00051"></a>00051 <span class="preprocessor">#else</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span>        <span class="keywordflow">return</span> __TBB_load_with_acquire(src);
+<a name="l00053"></a>00053 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS */</span>
+<a name="l00054"></a>00054     }
+<a name="l00055"></a>00055 
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058     <span class="keyword">typedef</span> <span class="keywordtype">char</span> reduction_context;
+<a name="l00059"></a>00059 
+<a name="l00061"></a>00061 
+<a name="l00062"></a>00062     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00063"></a>00063     <span class="keyword">class </span>finish_reduce: <span class="keyword">public</span> task {
+<a name="l00065"></a>00065         Body* my_body;
+<a name="l00066"></a>00066         <span class="keywordtype">bool</span> has_right_zombie;
+<a name="l00067"></a>00067         <span class="keyword">const</span> reduction_context my_context;
+<a name="l00068"></a>00068         aligned_space&lt;Body,1&gt; zombie_space;
+<a name="l00069"></a>00069         finish_reduce( <span class="keywordtype">char</span> context_ ) : 
+<a name="l00070"></a>00070             my_body(NULL),
+<a name="l00071"></a>00071             has_right_zombie(false),
+<a name="l00072"></a>00072             my_context(context_)
+<a name="l00073"></a>00073         {
+<a name="l00074"></a>00074         }
+<a name="l00075"></a>00075         task* execute() {
+<a name="l00076"></a>00076             <span class="keywordflow">if</span>( has_right_zombie ) {
+<a name="l00077"></a>00077                 <span class="comment">// Right child was stolen.</span>
+<a name="l00078"></a>00078                 Body* s = zombie_space.begin();
+<a name="l00079"></a>00079                 my_body-&gt;join( *s );
+<a name="l00080"></a>00080                 s-&gt;~Body();
+<a name="l00081"></a>00081             }
+<a name="l00082"></a>00082             <span class="keywordflow">if</span>( my_context==1 ) 
+<a name="l00083"></a>00083                 parallel_reduce_store_body( static_cast&lt;finish_reduce*&gt;(parent())-&gt;my_body, my_body );
+<a name="l00084"></a>00084             <span class="keywordflow">return</span> NULL;
+<a name="l00085"></a>00085         }       
+<a name="l00086"></a>00086         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range,<span class="keyword">typename</span> Body_, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00087"></a>00087         <span class="keyword">friend</span> <span class="keyword">class </span>start_reduce;
+<a name="l00088"></a>00088     };
+<a name="l00089"></a>00089 
+<a name="l00091"></a>00091 
+<a name="l00092"></a>00092     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00093"></a>00093     <span class="keyword">class </span>start_reduce: <span class="keyword">public</span> task {
+<a name="l00094"></a>00094         <span class="keyword">typedef</span> finish_reduce&lt;Body&gt; finish_type;
+<a name="l00095"></a>00095         Body* my_body;
+<a name="l00096"></a>00096         Range my_range;
+<a name="l00097"></a>00097         <span class="keyword">typename</span> Partitioner::partition_type my_partition;
+<a name="l00098"></a>00098         reduction_context my_context;
+<a name="l00099"></a>00099         <span class="comment">/*override*/</span> task* execute();
+<a name="l00100"></a>00100         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body_&gt;
+<a name="l00101"></a>00101         <span class="keyword">friend</span> <span class="keyword">class </span>finish_reduce;
+<a name="l00102"></a>00102     
+<a name="l00104"></a>00104         start_reduce( <span class="keyword">const</span> Range&amp; range, Body* body, Partitioner&amp; partitioner ) :
+<a name="l00105"></a>00105             my_body(body),
+<a name="l00106"></a>00106             my_range(range),
+<a name="l00107"></a>00107             my_partition(partitioner),
+<a name="l00108"></a>00108             my_context(0)
+<a name="l00109"></a>00109         {
+<a name="l00110"></a>00110         }
+<a name="l00112"></a>00112 
+<a name="l00113"></a>00113         start_reduce( start_reduce&amp; parent_, split ) :
+<a name="l00114"></a>00114             my_body(parent_.my_body),
+<a name="l00115"></a>00115             my_range(parent_.my_range,split()),
+<a name="l00116"></a>00116             my_partition(parent_.my_partition,split()),
+<a name="l00117"></a>00117             my_context(2)
+<a name="l00118"></a>00118         {
+<a name="l00119"></a>00119             my_partition.set_affinity(*<span class="keyword">this</span>);
+<a name="l00120"></a>00120             parent_.my_context = 1;
+<a name="l00121"></a>00121         }
+<a name="l00123"></a>00123         <span class="comment">/*override*/</span> <span class="keywordtype">void</span> note_affinity( affinity_id <span class="keywordtype">id</span> ) {
+<a name="l00124"></a>00124             my_partition.note_affinity( <span class="keywordtype">id</span> );
+<a name="l00125"></a>00125         }
+<a name="l00126"></a>00126 
+<a name="l00127"></a>00127 <span class="keyword">public</span>:
+<a name="l00128"></a>00128         <span class="keyword">static</span> <span class="keywordtype">void</span> run( <span class="keyword">const</span> Range&amp; range, Body&amp; body, Partitioner&amp; partitioner ) {
+<a name="l00129"></a>00129             <span class="keywordflow">if</span>( !range.empty() ) {
+<a name="l00130"></a>00130 #<span class="keywordflow">if</span> !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
+<a name="l00131"></a>00131                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( *<span class="keyword">new</span>(<a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>()) start_reduce(range,&amp;body,partitioner) );
+<a name="l00132"></a>00132 <span class="preprocessor">#else</span>
+<a name="l00133"></a>00133 <span class="preprocessor"></span>                <span class="comment">// Bound context prevents exceptions from body to affect nesting or sibling algorithms,</span>
+<a name="l00134"></a>00134                 <span class="comment">// and allows users to handle exceptions safely by wrapping parallel_for in the try-block.</span>
+<a name="l00135"></a>00135                 task_group_context context;
+<a name="l00136"></a>00136                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( *<span class="keyword">new</span>(task::allocate_root(context)) start_reduce(range,&amp;body,partitioner) );
+<a name="l00137"></a>00137 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT &amp;&amp; !TBB_JOIN_OUTER_TASK_GROUP */</span>
+<a name="l00138"></a>00138             }
+<a name="l00139"></a>00139         }
+<a name="l00140"></a>00140 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00141"></a>00141 <span class="preprocessor"></span>        <span class="keyword">static</span> <span class="keywordtype">void</span> run( <span class="keyword">const</span> Range&amp; range, Body&amp; body, Partitioner&amp; partitioner, task_group_context&amp; context ) {
+<a name="l00142"></a>00142             <span class="keywordflow">if</span>( !range.empty() ) 
+<a name="l00143"></a>00143                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( *<span class="keyword">new</span>(task::allocate_root(context)) start_reduce(range,&amp;body,partitioner) );
+<a name="l00144"></a>00144         }
+<a name="l00145"></a>00145 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00146"></a>00146     };
+<a name="l00147"></a>00147 
+<a name="l00148"></a>00148     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00149"></a>00149     task* start_reduce&lt;Range,Body,Partitioner&gt;::execute() {
+<a name="l00150"></a>00150         <span class="keywordflow">if</span>( my_context==2 ) {
+<a name="l00151"></a>00151             finish_type* p = static_cast&lt;finish_type*&gt;(parent() );
+<a name="l00152"></a>00152             <span class="keywordflow">if</span>( !parallel_reduce_load_body(p-&gt;my_body) ) {
+<a name="l00153"></a>00153                 my_body = <span class="keyword">new</span>( p-&gt;zombie_space.begin() ) Body(*my_body,split());
+<a name="l00154"></a>00154                 p-&gt;has_right_zombie = <span class="keyword">true</span>;
+<a name="l00155"></a>00155             } 
+<a name="l00156"></a>00156         }
+<a name="l00157"></a>00157         <span class="keywordflow">if</span>( !my_range.is_divisible() || my_partition.should_execute_range(*<span class="keyword">this</span>) ) {
+<a name="l00158"></a>00158             (*my_body)( my_range );
+<a name="l00159"></a>00159             <span class="keywordflow">if</span>( my_context==1 ) 
+<a name="l00160"></a>00160                 parallel_reduce_store_body(static_cast&lt;finish_type*&gt;(parent())-&gt;my_body, my_body );
+<a name="l00161"></a>00161             <span class="keywordflow">return</span> my_partition.continue_after_execute_range();
+<a name="l00162"></a>00162         } <span class="keywordflow">else</span> {
+<a name="l00163"></a>00163             finish_type&amp; c = *<span class="keyword">new</span>( allocate_continuation()) finish_type(my_context);
+<a name="l00164"></a>00164             recycle_as_child_of(c);
+<a name="l00165"></a>00165             c.set_ref_count(2);    
+<a name="l00166"></a>00166             <span class="keywordtype">bool</span> delay = my_partition.decide_whether_to_delay();
+<a name="l00167"></a>00167             start_reduce&amp; b = *<span class="keyword">new</span>( c.allocate_child() ) start_reduce(*<span class="keyword">this</span>,split());
+<a name="l00168"></a>00168             my_partition.spawn_or_delay(delay,b);
+<a name="l00169"></a>00169             <span class="keywordflow">return</span> <span class="keyword">this</span>;
+<a name="l00170"></a>00170         }
+<a name="l00171"></a>00171     } 
+<a name="l00172"></a>00172 
+<a name="l00174"></a>00174 
+<a name="l00178"></a>00178     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00179"></a>00179     <span class="keyword">class </span>lambda_reduce_body {
+<a name="l00180"></a>00180 
+<a name="l00181"></a>00181 <span class="comment">//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced</span>
+<a name="l00182"></a>00182 <span class="comment">//       (might require some performance measurements)</span>
+<a name="l00183"></a>00183 
+<a name="l00184"></a>00184         <span class="keyword">const</span> Value&amp;     identity_element;
+<a name="l00185"></a>00185         <span class="keyword">const</span> RealBody&amp;  my_real_body;
+<a name="l00186"></a>00186         <span class="keyword">const</span> Reduction&amp; my_reduction;
+<a name="l00187"></a>00187         Value            my_value;
+<a name="l00188"></a>00188         lambda_reduce_body&amp; operator= ( <span class="keyword">const</span> lambda_reduce_body&amp; other );
+<a name="l00189"></a>00189     <span class="keyword">public</span>:
+<a name="l00190"></a>00190         lambda_reduce_body( <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; body, <span class="keyword">const</span> Reduction&amp; reduction )
+<a name="l00191"></a>00191             : identity_element(identity)
+<a name="l00192"></a>00192             , my_real_body(body)
+<a name="l00193"></a>00193             , my_reduction(reduction)
+<a name="l00194"></a>00194             , my_value(identity)
+<a name="l00195"></a>00195         { }
+<a name="l00196"></a>00196         lambda_reduce_body( <span class="keyword">const</span> lambda_reduce_body&amp; other )
+<a name="l00197"></a>00197             : identity_element(other.identity_element)
+<a name="l00198"></a>00198             , my_real_body(other.my_real_body)
+<a name="l00199"></a>00199             , my_reduction(other.my_reduction)
+<a name="l00200"></a>00200             , my_value(other.my_value)
+<a name="l00201"></a>00201         { }
+<a name="l00202"></a>00202         lambda_reduce_body( lambda_reduce_body&amp; other, <a class="code" href="a00203.html">tbb::split</a> )
+<a name="l00203"></a>00203             : identity_element(other.identity_element)
+<a name="l00204"></a>00204             , my_real_body(other.my_real_body)
+<a name="l00205"></a>00205             , my_reduction(other.my_reduction)
+<a name="l00206"></a>00206             , my_value(other.identity_element)
+<a name="l00207"></a>00207         { }
+<a name="l00208"></a>00208         <span class="keywordtype">void</span> operator()(Range&amp; range) {
+<a name="l00209"></a>00209             my_value = my_real_body(range, const_cast&lt;const Value&amp;&gt;(my_value));
+<a name="l00210"></a>00210         }
+<a name="l00211"></a>00211         <span class="keywordtype">void</span> join( lambda_reduce_body&amp; rhs ) {
+<a name="l00212"></a>00212             my_value = my_reduction(const_cast&lt;const Value&amp;&gt;(my_value), const_cast&lt;const Value&amp;&gt;(rhs.my_value));
+<a name="l00213"></a>00213         }
+<a name="l00214"></a>00214         Value result()<span class="keyword"> const </span>{
+<a name="l00215"></a>00215             <span class="keywordflow">return</span> my_value;
+<a name="l00216"></a>00216         }
+<a name="l00217"></a>00217     };
+<a name="l00218"></a>00218 
+<a name="l00219"></a>00219 } <span class="comment">// namespace internal</span>
+<a name="l00221"></a>00221 <span class="comment"></span>
+<a name="l00222"></a>00222 <span class="comment">// Requirements on Range concept are documented in blocked_range.h</span>
+<a name="l00223"></a>00223 
+<a name="l00242"></a>00242 
+<a name="l00244"></a>00244 
+<a name="l00245"></a>00245 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00246"></a><a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">00246</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body ) {
+<a name="l00247"></a>00247     internal::start_reduce&lt;Range,Body, const __TBB_DEFAULT_PARTITIONER&gt;::run( range, body, __TBB_DEFAULT_PARTITIONER() );
+<a name="l00248"></a>00248 }
+<a name="l00249"></a>00249 
+<a name="l00251"></a>00251 
+<a name="l00252"></a>00252 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00253"></a><a class="code" href="a00280.html#gec1b7c03f9da909bef5db12e3d41bed3">00253</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner ) {
+<a name="l00254"></a>00254     internal::start_reduce&lt;Range,Body,const simple_partitioner&gt;::run( range, body, partitioner );
+<a name="l00255"></a>00255 }
+<a name="l00256"></a>00256 
+<a name="l00258"></a>00258 
+<a name="l00259"></a>00259 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00260"></a><a class="code" href="a00280.html#g18a19157e6245992fc00ca0adeb7dd37">00260</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner ) {
+<a name="l00261"></a>00261     internal::start_reduce&lt;Range,Body,const auto_partitioner&gt;::run( range, body, partitioner );
+<a name="l00262"></a>00262 }
+<a name="l00263"></a>00263 
+<a name="l00265"></a>00265 
+<a name="l00266"></a>00266 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00267"></a><a class="code" href="a00280.html#gc61e73fcc36c92d79a217fc355ff4a6b">00267</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner ) {
+<a name="l00268"></a>00268     internal::start_reduce&lt;Range,Body,affinity_partitioner&gt;::run( range, body, partitioner );
+<a name="l00269"></a>00269 }
+<a name="l00270"></a>00270 
+<a name="l00271"></a>00271 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00273"></a>00273 <span class="preprocessor"></span>
+<a name="l00274"></a>00274 <span class="preprocessor">template&lt;typename Range, typename Body&gt;</span>
+<a name="l00275"></a><a class="code" href="a00280.html#g45cb00c42a18e334bbde8b7535afe460">00275</a> <span class="preprocessor"></span><span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00276"></a>00276     internal::start_reduce&lt;Range,Body,const simple_partitioner&gt;::run( range, body, partitioner, context );
+<a name="l00277"></a>00277 }
+<a name="l00278"></a>00278 
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00282"></a><a class="code" href="a00280.html#g1c1ea1d7c61b3c225e92c70d669a53a5">00282</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00283"></a>00283     internal::start_reduce&lt;Range,Body,const auto_partitioner&gt;::run( range, body, partitioner, context );
+<a name="l00284"></a>00284 }
+<a name="l00285"></a>00285 
+<a name="l00287"></a>00287 
+<a name="l00288"></a>00288 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00289"></a><a class="code" href="a00280.html#gd9ac3a3811060314695f33b703c6e11b">00289</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00290"></a>00290     internal::start_reduce&lt;Range,Body,affinity_partitioner&gt;::run( range, body, partitioner, context );
+<a name="l00291"></a>00291 }
+<a name="l00292"></a>00292 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00293"></a>00293 
+<a name="l00297"></a>00297 
+<a name="l00298"></a>00298 
+<a name="l00299"></a>00299 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00300"></a><a class="code" href="a00280.html#gc9412e09fb01fcad8c018ea9cffb28ef">00300</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction ) {
+<a name="l00301"></a>00301     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00302"></a>00302     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<span class="keyword">const</span> __TBB_DEFAULT_PARTITIONER&gt;
+<a name="l00303"></a>00303                           ::run(range, body, __TBB_DEFAULT_PARTITIONER() );
+<a name="l00304"></a>00304     <span class="keywordflow">return</span> body.result();
+<a name="l00305"></a>00305 }
+<a name="l00306"></a>00306 
+<a name="l00308"></a>00308 
+<a name="l00309"></a>00309 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00310"></a><a class="code" href="a00280.html#gaddffeec0e892ac3d6fc7fc2053e1eca">00310</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00311"></a>00311                        <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner ) {
+<a name="l00312"></a>00312     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00313"></a>00313     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&gt;
+<a name="l00314"></a>00314                           ::run(range, body, partitioner );
+<a name="l00315"></a>00315     <span class="keywordflow">return</span> body.result();
+<a name="l00316"></a>00316 }
+<a name="l00317"></a>00317 
+<a name="l00319"></a>00319 
+<a name="l00320"></a>00320 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00321"></a><a class="code" href="a00280.html#gb175401f0729e40dd2c5860a17c14385">00321</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00322"></a>00322                        <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner ) {
+<a name="l00323"></a>00323     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00324"></a>00324     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&gt;
+<a name="l00325"></a>00325                           ::run( range, body, partitioner );
+<a name="l00326"></a>00326     <span class="keywordflow">return</span> body.result();
+<a name="l00327"></a>00327 }
+<a name="l00328"></a>00328 
+<a name="l00330"></a>00330 
+<a name="l00331"></a>00331 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00332"></a><a class="code" href="a00280.html#gb7f1f1828ae2b330ce05b8513a495154">00332</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00333"></a>00333                        <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner ) {
+<a name="l00334"></a>00334     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00335"></a>00335     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<a class="code" href="a00145.html">affinity_partitioner</a>&gt;
+<a name="l00336"></a>00336                                         ::run( range, body, partitioner );
+<a name="l00337"></a>00337     <span class="keywordflow">return</span> body.result();
+<a name="l00338"></a>00338 }
+<a name="l00339"></a>00339 
+<a name="l00340"></a>00340 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00342"></a>00342 <span class="preprocessor"></span>
+<a name="l00343"></a>00343 <span class="preprocessor">template&lt;typename Range, typename Value, typename RealBody, typename Reduction&gt;</span>
+<a name="l00344"></a><a class="code" href="a00280.html#gfbc0cc2026d87f11a96bcd62788f5bb5">00344</a> <span class="preprocessor"></span>Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00345"></a>00345                        <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00346"></a>00346     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00347"></a>00347     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&gt;
+<a name="l00348"></a>00348                           ::run( range, body, partitioner, context );
+<a name="l00349"></a>00349     <span class="keywordflow">return</span> body.result();
+<a name="l00350"></a>00350 }
+<a name="l00351"></a>00351 
+<a name="l00353"></a>00353 
+<a name="l00354"></a>00354 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00355"></a><a class="code" href="a00280.html#g630c90a399937d9d4ae70ff883186dfd">00355</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00356"></a>00356                        <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00357"></a>00357     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00358"></a>00358     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&gt;
+<a name="l00359"></a>00359                           ::run( range, body, partitioner, context );
+<a name="l00360"></a>00360     <span class="keywordflow">return</span> body.result();
+<a name="l00361"></a>00361 }
+<a name="l00362"></a>00362 
+<a name="l00364"></a>00364 
+<a name="l00365"></a>00365 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Value, <span class="keyword">typename</span> RealBody, <span class="keyword">typename</span> Reduction&gt;
+<a name="l00366"></a><a class="code" href="a00280.html#g496bd7eadb3b97495ccb5655ef90319e">00366</a> Value <a class="code" href="a00280.html#g1b3d59c5eb62683c5754db6970392fa3">parallel_reduce</a>( <span class="keyword">const</span> Range&amp; range, <span class="keyword">const</span> Value&amp; identity, <span class="keyword">const</span> RealBody&amp; real_body, <span class="keyword">const</span> Reduction&amp; reduction,
+<a name="l00367"></a>00367                        <a class="code" href="a00145.html">affinity_partitioner</a>&amp; partitioner, <a class="code" href="a00206.html">task_group_context</a>&amp; context ) {
+<a name="l00368"></a>00368     internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt; body(identity, real_body, reduction);
+<a name="l00369"></a>00369     internal::start_reduce&lt;Range,internal::lambda_reduce_body&lt;Range,Value,RealBody,Reduction&gt;,<a class="code" href="a00145.html">affinity_partitioner</a>&gt;
+<a name="l00370"></a>00370                                         ::run( range, body, partitioner, context );
+<a name="l00371"></a>00371     <span class="keywordflow">return</span> body.result();
+<a name="l00372"></a>00372 }
+<a name="l00373"></a>00373 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00374"></a>00374 
+<a name="l00375"></a>00375 
+<a name="l00376"></a>00376 } <span class="comment">// namespace tbb</span>
+<a name="l00377"></a>00377 
+<a name="l00378"></a>00378 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_reduce_H */</span>
+<a name="l00379"></a>00379 
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00401.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00401.html
new file mode 100644 (file)
index 0000000..67a4900
--- /dev/null
@@ -0,0 +1,341 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_scan.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_scan.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_scan_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_scan_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "partitioner.h"</span>
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>tbb {
+<a name="l00030"></a>00030 
+<a name="l00032"></a>00032 
+<a name="l00033"></a><a class="code" href="a00186.html">00033</a> <span class="keyword">struct </span><a class="code" href="a00186.html">pre_scan_tag</a> {
+<a name="l00034"></a>00034     <span class="keyword">static</span> <span class="keywordtype">bool</span> is_final_scan() {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00035"></a>00035 };
+<a name="l00036"></a>00036 
+<a name="l00038"></a>00038 
+<a name="l00039"></a><a class="code" href="a00171.html">00039</a> <span class="keyword">struct </span><a class="code" href="a00171.html">final_scan_tag</a> {
+<a name="l00040"></a>00040     <span class="keyword">static</span> <span class="keywordtype">bool</span> is_final_scan() {<span class="keywordflow">return</span> <span class="keyword">true</span>;}
+<a name="l00041"></a>00041 };
+<a name="l00042"></a>00042 
+<a name="l00044"></a>00044 <span class="keyword">namespace </span>internal {
+<a name="l00045"></a>00045 
+<a name="l00047"></a>00047 
+<a name="l00048"></a>00048     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00049"></a>00049     <span class="keyword">class </span>final_sum: <span class="keyword">public</span> <a class="code" href="a00204.html">task</a> {
+<a name="l00050"></a>00050     <span class="keyword">public</span>:
+<a name="l00051"></a>00051         Body body;
+<a name="l00052"></a>00052     <span class="keyword">private</span>:
+<a name="l00053"></a>00053         <a class="code" href="a00146.html">aligned_space&lt;Range,1&gt;</a> range;
+<a name="l00055"></a>00055         Body* stuff_last;
+<a name="l00056"></a>00056     <span class="keyword">public</span>:
+<a name="l00057"></a>00057         final_sum( Body&amp; body_ ) :
+<a name="l00058"></a>00058             body(body_,<a class="code" href="a00203.html">split</a>())
+<a name="l00059"></a>00059         {
+<a name="l00060"></a>00060             poison_pointer(stuff_last);
+<a name="l00061"></a>00061         }
+<a name="l00062"></a>00062         ~final_sum() {
+<a name="l00063"></a>00063             range.begin()-&gt;~Range();
+<a name="l00064"></a>00064         }     
+<a name="l00065"></a>00065         <span class="keywordtype">void</span> finish_construction( <span class="keyword">const</span> Range&amp; range_, Body* stuff_last_ ) {
+<a name="l00066"></a>00066             <span class="keyword">new</span>( range.begin() ) Range(range_);
+<a name="l00067"></a>00067             stuff_last = stuff_last_;
+<a name="l00068"></a>00068         }
+<a name="l00069"></a>00069     <span class="keyword">private</span>:
+<a name="l00070"></a>00070         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00071"></a>00071             body( *range.begin(), final_scan_tag() );
+<a name="l00072"></a>00072             <span class="keywordflow">if</span>( stuff_last )
+<a name="l00073"></a>00073                 stuff_last-&gt;assign(body);
+<a name="l00074"></a>00074             <span class="keywordflow">return</span> NULL;
+<a name="l00075"></a>00075         }
+<a name="l00076"></a>00076     };       
+<a name="l00077"></a>00077 
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00081"></a>00081     <span class="keyword">class </span>sum_node: <span class="keyword">public</span> task {
+<a name="l00082"></a>00082         <span class="keyword">typedef</span> final_sum&lt;Range,Body&gt; final_sum_type;
+<a name="l00083"></a>00083     <span class="keyword">public</span>:
+<a name="l00084"></a>00084         final_sum_type *incoming; 
+<a name="l00085"></a>00085         final_sum_type *body;
+<a name="l00086"></a>00086         Body *stuff_last;
+<a name="l00087"></a>00087     <span class="keyword">private</span>:
+<a name="l00088"></a>00088         final_sum_type *left_sum;
+<a name="l00089"></a>00089         sum_node *left;
+<a name="l00090"></a>00090         sum_node *right;     
+<a name="l00091"></a>00091         <span class="keywordtype">bool</span> left_is_final;
+<a name="l00092"></a>00092         Range range;
+<a name="l00093"></a>00093         sum_node( <span class="keyword">const</span> Range range_, <span class="keywordtype">bool</span> left_is_final_ ) : 
+<a name="l00094"></a>00094             left_sum(NULL), 
+<a name="l00095"></a>00095             left(NULL), 
+<a name="l00096"></a>00096             right(NULL), 
+<a name="l00097"></a>00097             left_is_final(left_is_final_), 
+<a name="l00098"></a>00098             range(range_)
+<a name="l00099"></a>00099         {
+<a name="l00100"></a>00100             <span class="comment">// Poison fields that will be set by second pass.</span>
+<a name="l00101"></a>00101             poison_pointer(body);
+<a name="l00102"></a>00102             poison_pointer(incoming);
+<a name="l00103"></a>00103         }
+<a name="l00104"></a>00104         task* create_child( <span class="keyword">const</span> Range&amp; range_, final_sum_type&amp; f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) {
+<a name="l00105"></a>00105             <span class="keywordflow">if</span>( !n ) {
+<a name="l00106"></a>00106                 f.recycle_as_child_of( *<span class="keyword">this</span> );
+<a name="l00107"></a>00107                 f.finish_construction( range_, stuff_last_ );
+<a name="l00108"></a>00108                 <span class="keywordflow">return</span> &amp;f;
+<a name="l00109"></a>00109             } <span class="keywordflow">else</span> {
+<a name="l00110"></a>00110                 n-&gt;body = &amp;f;
+<a name="l00111"></a>00111                 n-&gt;incoming = incoming_;
+<a name="l00112"></a>00112                 n-&gt;stuff_last = stuff_last_;
+<a name="l00113"></a>00113                 <span class="keywordflow">return</span> n;
+<a name="l00114"></a>00114             }
+<a name="l00115"></a>00115         }
+<a name="l00116"></a>00116         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00117"></a>00117             <span class="keywordflow">if</span>( body ) {
+<a name="l00118"></a>00118                 <span class="keywordflow">if</span>( incoming )
+<a name="l00119"></a>00119                     left_sum-&gt;body.reverse_join( incoming-&gt;body );
+<a name="l00120"></a>00120                 recycle_as_continuation();
+<a name="l00121"></a>00121                 sum_node&amp; c = *<span class="keyword">this</span>;
+<a name="l00122"></a>00122                 task* b = c.create_child(Range(range,split()),*left_sum,right,left_sum,stuff_last);
+<a name="l00123"></a>00123                 task* a = left_is_final ? NULL : c.create_child(range,*body,left,incoming,NULL);
+<a name="l00124"></a>00124                 set_ref_count( (a!=NULL)+(b!=NULL) );
+<a name="l00125"></a>00125                 body = NULL; 
+<a name="l00126"></a>00126                 <span class="keywordflow">if</span>( a ) spawn(*b);
+<a name="l00127"></a>00127                 <span class="keywordflow">else</span> a = b;
+<a name="l00128"></a>00128                 <span class="keywordflow">return</span> a;
+<a name="l00129"></a>00129             } <span class="keywordflow">else</span> {
+<a name="l00130"></a>00130                 <span class="keywordflow">return</span> NULL;
+<a name="l00131"></a>00131             }
+<a name="l00132"></a>00132         }
+<a name="l00133"></a>00133         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range_,<span class="keyword">typename</span> Body_,<span class="keyword">typename</span> Partitioner_&gt;
+<a name="l00134"></a>00134         <span class="keyword">friend</span> <span class="keyword">class </span>start_scan;
+<a name="l00135"></a>00135 
+<a name="l00136"></a>00136         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range_,<span class="keyword">typename</span> Body_&gt;
+<a name="l00137"></a>00137         <span class="keyword">friend</span> <span class="keyword">class </span>finish_scan;
+<a name="l00138"></a>00138     };
+<a name="l00139"></a>00139 
+<a name="l00141"></a>00141 
+<a name="l00142"></a>00142     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00143"></a>00143     <span class="keyword">class </span>finish_scan: <span class="keyword">public</span> task {
+<a name="l00144"></a>00144         <span class="keyword">typedef</span> sum_node&lt;Range,Body&gt; sum_node_type;
+<a name="l00145"></a>00145         <span class="keyword">typedef</span> final_sum&lt;Range,Body&gt; final_sum_type;
+<a name="l00146"></a>00146         final_sum_type** <span class="keyword">const</span> sum;
+<a name="l00147"></a>00147         sum_node_type*&amp; return_slot;
+<a name="l00148"></a>00148     <span class="keyword">public</span>:
+<a name="l00149"></a>00149         final_sum_type* right_zombie;
+<a name="l00150"></a>00150         sum_node_type&amp; result;
+<a name="l00151"></a>00151 
+<a name="l00152"></a>00152         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00153"></a>00153             __TBB_ASSERT( result.ref_count()==(result.left!=NULL)+(result.right!=NULL), NULL );
+<a name="l00154"></a>00154             <span class="keywordflow">if</span>( result.left )
+<a name="l00155"></a>00155                 result.left_is_final = <span class="keyword">false</span>;
+<a name="l00156"></a>00156             <span class="keywordflow">if</span>( right_zombie &amp;&amp; sum ) 
+<a name="l00157"></a>00157                 ((*sum)-&gt;body).reverse_join(result.left_sum-&gt;body);
+<a name="l00158"></a>00158             __TBB_ASSERT( !return_slot, NULL );
+<a name="l00159"></a>00159             <span class="keywordflow">if</span>( right_zombie || result.right ) {
+<a name="l00160"></a>00160                 return_slot = &amp;result;
+<a name="l00161"></a>00161             } <span class="keywordflow">else</span> {
+<a name="l00162"></a>00162                 destroy( result );
+<a name="l00163"></a>00163             }
+<a name="l00164"></a>00164             <span class="keywordflow">if</span>( right_zombie &amp;&amp; !sum &amp;&amp; !result.right ) destroy(*right_zombie);
+<a name="l00165"></a>00165             <span class="keywordflow">return</span> NULL;
+<a name="l00166"></a>00166         }
+<a name="l00167"></a>00167 
+<a name="l00168"></a>00168         finish_scan( sum_node_type*&amp; return_slot_, final_sum_type** sum_, sum_node_type&amp; result_ ) : 
+<a name="l00169"></a>00169             sum(sum_),
+<a name="l00170"></a>00170             return_slot(return_slot_), 
+<a name="l00171"></a>00171             right_zombie(NULL),
+<a name="l00172"></a>00172             result(result_)
+<a name="l00173"></a>00173         {
+<a name="l00174"></a>00174             __TBB_ASSERT( !return_slot, NULL );
+<a name="l00175"></a>00175         }
+<a name="l00176"></a>00176     };
+<a name="l00177"></a>00177 
+<a name="l00179"></a>00179 
+<a name="l00180"></a>00180     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner=simple_partitioner&gt;
+<a name="l00181"></a>00181     <span class="keyword">class </span>start_scan: <span class="keyword">public</span> task {
+<a name="l00182"></a>00182         <span class="keyword">typedef</span> sum_node&lt;Range,Body&gt; sum_node_type;
+<a name="l00183"></a>00183         <span class="keyword">typedef</span> final_sum&lt;Range,Body&gt; final_sum_type;
+<a name="l00184"></a>00184         final_sum_type* body;
+<a name="l00186"></a>00186         final_sum_type** sum; 
+<a name="l00187"></a>00187         sum_node_type** return_slot;
+<a name="l00189"></a>00189         sum_node_type* parent_sum;
+<a name="l00190"></a>00190         <span class="keywordtype">bool</span> is_final;
+<a name="l00191"></a>00191         <span class="keywordtype">bool</span> is_right_child;
+<a name="l00192"></a>00192         Range range;
+<a name="l00193"></a>00193         <span class="keyword">typename</span> Partitioner::partition_type partition;
+<a name="l00194"></a>00194         <span class="comment">/*override*/</span> task* execute();
+<a name="l00195"></a>00195     <span class="keyword">public</span>:
+<a name="l00196"></a>00196         start_scan( sum_node_type*&amp; return_slot_, start_scan&amp; parent_, sum_node_type* parent_sum_ ) :
+<a name="l00197"></a>00197             body(parent_.body),
+<a name="l00198"></a>00198             sum(parent_.sum),
+<a name="l00199"></a>00199             return_slot(&amp;return_slot_),
+<a name="l00200"></a>00200             parent_sum(parent_sum_),
+<a name="l00201"></a>00201             is_final(parent_.is_final),
+<a name="l00202"></a>00202             is_right_child(false),
+<a name="l00203"></a>00203             range(parent_.range,split()),
+<a name="l00204"></a>00204             partition(parent_.partition,split())
+<a name="l00205"></a>00205         {
+<a name="l00206"></a>00206             __TBB_ASSERT( !*return_slot, NULL );
+<a name="l00207"></a>00207         }
+<a name="l00208"></a>00208 
+<a name="l00209"></a>00209         start_scan( sum_node_type*&amp; return_slot_, <span class="keyword">const</span> Range&amp; range_, final_sum_type&amp; body_, <span class="keyword">const</span> Partitioner&amp; partitioner_) :
+<a name="l00210"></a>00210             body(&amp;body_),
+<a name="l00211"></a>00211             sum(NULL),
+<a name="l00212"></a>00212             return_slot(&amp;return_slot_),
+<a name="l00213"></a>00213             parent_sum(NULL),
+<a name="l00214"></a>00214             is_final(true),
+<a name="l00215"></a>00215             is_right_child(false),
+<a name="l00216"></a>00216             range(range_),
+<a name="l00217"></a>00217             partition(partitioner_)
+<a name="l00218"></a>00218         {
+<a name="l00219"></a>00219             __TBB_ASSERT( !*return_slot, NULL );
+<a name="l00220"></a>00220         }
+<a name="l00221"></a>00221 
+<a name="l00222"></a>00222         <span class="keyword">static</span> <span class="keywordtype">void</span> run(  <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> Partitioner&amp; partitioner ) {
+<a name="l00223"></a>00223             <span class="keywordflow">if</span>( !range.empty() ) {
+<a name="l00224"></a>00224                 <span class="keyword">typedef</span> internal::start_scan&lt;Range,Body,Partitioner&gt; start_pass1_type;
+<a name="l00225"></a>00225                 internal::sum_node&lt;Range,Body&gt;* root = NULL;
+<a name="l00226"></a>00226                 <span class="keyword">typedef</span> internal::final_sum&lt;Range,Body&gt; final_sum_type;
+<a name="l00227"></a>00227                 final_sum_type* temp_body = <span class="keyword">new</span>(<a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>()) final_sum_type( body );
+<a name="l00228"></a>00228                 start_pass1_type&amp; pass1 = *<span class="keyword">new</span>(<a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>()) start_pass1_type(
+<a name="l00229"></a>00229                     <span class="comment">/*return_slot=*/</span>root,
+<a name="l00230"></a>00230                     range,
+<a name="l00231"></a>00231                     *temp_body,
+<a name="l00232"></a>00232                     partitioner );
+<a name="l00233"></a>00233                 <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( pass1 );
+<a name="l00234"></a>00234                 <span class="keywordflow">if</span>( root ) {
+<a name="l00235"></a>00235                     root-&gt;body = temp_body;
+<a name="l00236"></a>00236                     root-&gt;incoming = NULL;
+<a name="l00237"></a>00237                     root-&gt;stuff_last = &amp;body;
+<a name="l00238"></a>00238                     <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( *root );
+<a name="l00239"></a>00239                 } <span class="keywordflow">else</span> {
+<a name="l00240"></a>00240                     body.assign(temp_body-&gt;body);
+<a name="l00241"></a>00241                     temp_body-&gt;finish_construction( range, NULL );
+<a name="l00242"></a>00242                     temp_body-&gt;destroy(*temp_body);
+<a name="l00243"></a>00243                 }
+<a name="l00244"></a>00244             }
+<a name="l00245"></a>00245         }
+<a name="l00246"></a>00246     };
+<a name="l00247"></a>00247 
+<a name="l00248"></a>00248     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt;
+<a name="l00249"></a>00249     task* start_scan&lt;Range,Body,Partitioner&gt;::execute() {
+<a name="l00250"></a>00250         <span class="keyword">typedef</span> internal::finish_scan&lt;Range,Body&gt; finish_pass1_type;
+<a name="l00251"></a>00251         finish_pass1_type* p = parent_sum ? static_cast&lt;finish_pass1_type*&gt;( parent() ) : NULL;
+<a name="l00252"></a>00252         <span class="comment">// Inspecting p-&gt;result.left_sum would ordinarily be a race condition.</span>
+<a name="l00253"></a>00253         <span class="comment">// But we inspect it only if we are not a stolen task, in which case we</span>
+<a name="l00254"></a>00254         <span class="comment">// know that task assigning to p-&gt;result.left_sum has completed.</span>
+<a name="l00255"></a>00255         <span class="keywordtype">bool</span> treat_as_stolen = is_right_child &amp;&amp; (is_stolen_task() || body!=p-&gt;result.left_sum);
+<a name="l00256"></a>00256         <span class="keywordflow">if</span>( treat_as_stolen ) {
+<a name="l00257"></a>00257             <span class="comment">// Invocation is for right child that has been really stolen or needs to be virtually stolen</span>
+<a name="l00258"></a>00258             p-&gt;right_zombie = body = <span class="keyword">new</span>( allocate_root() ) final_sum_type(body-&gt;body);
+<a name="l00259"></a>00259             is_final = <span class="keyword">false</span>;
+<a name="l00260"></a>00260         }
+<a name="l00261"></a>00261         task* next_task = NULL;
+<a name="l00262"></a>00262         <span class="keywordflow">if</span>( (is_right_child &amp;&amp; !treat_as_stolen) || !range.is_divisible() || partition.should_execute_range(*<span class="keyword">this</span>) ) {
+<a name="l00263"></a>00263             <span class="keywordflow">if</span>( is_final )
+<a name="l00264"></a>00264                 (body-&gt;body)( range, final_scan_tag() );
+<a name="l00265"></a>00265             <span class="keywordflow">else</span> <span class="keywordflow">if</span>( sum )
+<a name="l00266"></a>00266                 (body-&gt;body)( range, pre_scan_tag() );
+<a name="l00267"></a>00267             <span class="keywordflow">if</span>( sum ) 
+<a name="l00268"></a>00268                 *sum = body;
+<a name="l00269"></a>00269             __TBB_ASSERT( !*return_slot, NULL );
+<a name="l00270"></a>00270         } <span class="keywordflow">else</span> {
+<a name="l00271"></a>00271             sum_node_type* result;
+<a name="l00272"></a>00272             <span class="keywordflow">if</span>( parent_sum ) 
+<a name="l00273"></a>00273                 result = <span class="keyword">new</span>(allocate_additional_child_of(*parent_sum)) sum_node_type(range,<span class="comment">/*left_is_final=*/</span>is_final);
+<a name="l00274"></a>00274             <span class="keywordflow">else</span>
+<a name="l00275"></a>00275                 result = <span class="keyword">new</span>(<a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>()) sum_node_type(range,<span class="comment">/*left_is_final=*/</span>is_final);
+<a name="l00276"></a>00276             finish_pass1_type&amp; c = *<span class="keyword">new</span>( allocate_continuation()) finish_pass1_type(*return_slot,sum,*result);
+<a name="l00277"></a>00277             <span class="comment">// Split off right child</span>
+<a name="l00278"></a>00278             start_scan&amp; b = *<span class="keyword">new</span>( c.allocate_child() ) start_scan( <span class="comment">/*return_slot=*/</span>result-&gt;right, *<span class="keyword">this</span>, result );
+<a name="l00279"></a>00279             b.is_right_child = <span class="keyword">true</span>;    
+<a name="l00280"></a>00280             <span class="comment">// Left child is recycling of *this.  Must recycle this before spawning b, </span>
+<a name="l00281"></a>00281             <span class="comment">// otherwise b might complete and decrement c.ref_count() to zero, which</span>
+<a name="l00282"></a>00282             <span class="comment">// would cause c.execute() to run prematurely.</span>
+<a name="l00283"></a>00283             recycle_as_child_of(c);
+<a name="l00284"></a>00284             c.set_ref_count(2);
+<a name="l00285"></a>00285             c.spawn(b);
+<a name="l00286"></a>00286             sum = &amp;result-&gt;left_sum;
+<a name="l00287"></a>00287             return_slot = &amp;result-&gt;left;
+<a name="l00288"></a>00288             is_right_child = <span class="keyword">false</span>;
+<a name="l00289"></a>00289             next_task = <span class="keyword">this</span>;
+<a name="l00290"></a>00290             parent_sum = result; 
+<a name="l00291"></a>00291             __TBB_ASSERT( !*return_slot, NULL );
+<a name="l00292"></a>00292         }
+<a name="l00293"></a>00293         <span class="keywordflow">return</span> next_task;
+<a name="l00294"></a>00294     } 
+<a name="l00295"></a>00295 } <span class="comment">// namespace internal</span>
+<a name="l00297"></a>00297 <span class="comment"></span>
+<a name="l00298"></a>00298 <span class="comment">// Requirements on Range concept are documented in blocked_range.h</span>
+<a name="l00299"></a>00299 
+<a name="l00317"></a>00317 
+<a name="l00319"></a>00319 
+<a name="l00320"></a>00320 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00321"></a><a class="code" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">00321</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">parallel_scan</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body ) {
+<a name="l00322"></a>00322     internal::start_scan&lt;Range,Body,__TBB_DEFAULT_PARTITIONER&gt;::run(range,body,__TBB_DEFAULT_PARTITIONER());
+<a name="l00323"></a>00323 }
+<a name="l00324"></a>00324 
+<a name="l00326"></a>00326 
+<a name="l00327"></a>00327 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00328"></a><a class="code" href="a00280.html#gc9fac8870b2e6365fb337014404529df">00328</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">parallel_scan</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; partitioner ) {
+<a name="l00329"></a>00329     internal::start_scan&lt;Range,Body,simple_partitioner&gt;::run(range,body,partitioner);
+<a name="l00330"></a>00330 }
+<a name="l00331"></a>00331 
+<a name="l00333"></a>00333 
+<a name="l00334"></a>00334 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt;
+<a name="l00335"></a><a class="code" href="a00280.html#g62fde400a37bbca1a2fddc8e3d22f556">00335</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#ged143f31dd3d96ded02ab3db915b91c7">parallel_scan</a>( <span class="keyword">const</span> Range&amp; range, Body&amp; body, <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; partitioner ) {
+<a name="l00336"></a>00336     internal::start_scan&lt;Range,Body,auto_partitioner&gt;::run(range,body,partitioner);
+<a name="l00337"></a>00337 }
+<a name="l00339"></a>00339 
+<a name="l00340"></a>00340 } <span class="comment">// namespace tbb</span>
+<a name="l00341"></a>00341 
+<a name="l00342"></a>00342 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_scan_H */</span>
+<a name="l00343"></a>00343 
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00406.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00406.html
new file mode 100644 (file)
index 0000000..42c81a0
--- /dev/null
@@ -0,0 +1,225 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_sort.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_sort.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_sort_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_sort_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "parallel_for.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include &lt;algorithm&gt;</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include &lt;iterator&gt;</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include &lt;functional&gt;</span>
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="keyword">namespace </span>tbb {
+<a name="l00031"></a>00031 
+<a name="l00033"></a>00033 <span class="keyword">namespace </span>internal {
+<a name="l00034"></a>00034 
+<a name="l00036"></a>00036 
+<a name="l00039"></a>00039 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator, <span class="keyword">typename</span> Compare&gt;
+<a name="l00040"></a>00040 <span class="keyword">class </span>quick_sort_range: <span class="keyword">private</span> no_assign {
+<a name="l00041"></a>00041 
+<a name="l00042"></a>00042     <span class="keyword">inline</span> size_t median_of_three(<span class="keyword">const</span> RandomAccessIterator &amp;array, size_t l, size_t m, size_t r)<span class="keyword"> const </span>{
+<a name="l00043"></a>00043         <span class="keywordflow">return</span> comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) 
+<a name="l00044"></a>00044                                         : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) );
+<a name="l00045"></a>00045     }
+<a name="l00046"></a>00046 
+<a name="l00047"></a>00047     <span class="keyword">inline</span> size_t pseudo_median_of_nine( <span class="keyword">const</span> RandomAccessIterator &amp;array, <span class="keyword">const</span> quick_sort_range &amp;range )<span class="keyword"> const </span>{
+<a name="l00048"></a>00048         size_t offset = range.size/8u;
+<a name="l00049"></a>00049         <span class="keywordflow">return</span> median_of_three(array, 
+<a name="l00050"></a>00050                                median_of_three(array, 0, offset, offset*2),
+<a name="l00051"></a>00051                                median_of_three(array, offset*3, offset*4, offset*5),
+<a name="l00052"></a>00052                                median_of_three(array, offset*6, offset*7, range.size - 1) );
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054     }
+<a name="l00055"></a>00055 
+<a name="l00056"></a>00056 <span class="keyword">public</span>:
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058     <span class="keyword">static</span> <span class="keyword">const</span> size_t grainsize = 500;
+<a name="l00059"></a>00059     <span class="keyword">const</span> Compare &amp;comp;
+<a name="l00060"></a>00060     RandomAccessIterator begin;
+<a name="l00061"></a>00061     size_t size;
+<a name="l00062"></a>00062 
+<a name="l00063"></a>00063     quick_sort_range( RandomAccessIterator begin_, size_t size_, <span class="keyword">const</span> Compare &amp;comp_ ) :
+<a name="l00064"></a>00064         comp(comp_), begin(begin_), size(size_) {}
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> size==0;}
+<a name="l00067"></a>00067     <span class="keywordtype">bool</span> is_divisible()<span class="keyword"> const </span>{<span class="keywordflow">return</span> size&gt;=grainsize;}
+<a name="l00068"></a>00068 
+<a name="l00069"></a>00069     quick_sort_range( quick_sort_range&amp; range, split ) : comp(range.comp) {
+<a name="l00070"></a>00070         RandomAccessIterator array = range.begin;
+<a name="l00071"></a>00071         RandomAccessIterator key0 = range.begin; 
+<a name="l00072"></a>00072         size_t m = pseudo_median_of_nine(array, range);
+<a name="l00073"></a>00073         <span class="keywordflow">if</span> (m) std::swap ( array[0], array[m] );
+<a name="l00074"></a>00074 
+<a name="l00075"></a>00075         size_t i=0;
+<a name="l00076"></a>00076         size_t j=range.size;
+<a name="l00077"></a>00077         <span class="comment">// Partition interval [i+1,j-1] with key *key0.</span>
+<a name="l00078"></a>00078         <span class="keywordflow">for</span>(;;) {
+<a name="l00079"></a>00079             __TBB_ASSERT( i&lt;j, NULL );
+<a name="l00080"></a>00080             <span class="comment">// Loop must terminate since array[l]==*key0.</span>
+<a name="l00081"></a>00081             <span class="keywordflow">do</span> {
+<a name="l00082"></a>00082                 --j;
+<a name="l00083"></a>00083                 __TBB_ASSERT( i&lt;=j, <span class="stringliteral">"bad ordering relation?"</span> );
+<a name="l00084"></a>00084             } <span class="keywordflow">while</span>( comp( *key0, array[j] ));
+<a name="l00085"></a>00085             <span class="keywordflow">do</span> {
+<a name="l00086"></a>00086                 __TBB_ASSERT( i&lt;=j, NULL );
+<a name="l00087"></a>00087                 <span class="keywordflow">if</span>( i==j ) <span class="keywordflow">goto</span> partition;
+<a name="l00088"></a>00088                 ++i;
+<a name="l00089"></a>00089             } <span class="keywordflow">while</span>( comp( array[i],*key0 ));
+<a name="l00090"></a>00090             <span class="keywordflow">if</span>( i==j ) <span class="keywordflow">goto</span> partition;
+<a name="l00091"></a>00091             std::swap( array[i], array[j] );
+<a name="l00092"></a>00092         }
+<a name="l00093"></a>00093 partition:
+<a name="l00094"></a>00094         <span class="comment">// Put the partition key were it belongs</span>
+<a name="l00095"></a>00095         std::swap( array[j], *key0 );
+<a name="l00096"></a>00096         <span class="comment">// array[l..j) is less or equal to key.</span>
+<a name="l00097"></a>00097         <span class="comment">// array(j..r) is greater or equal to key.</span>
+<a name="l00098"></a>00098         <span class="comment">// array[j] is equal to key</span>
+<a name="l00099"></a>00099         i=j+1;
+<a name="l00100"></a>00100         begin = array+i;
+<a name="l00101"></a>00101         size = range.size-i;
+<a name="l00102"></a>00102         range.size = j;
+<a name="l00103"></a>00103     }
+<a name="l00104"></a>00104 };
+<a name="l00105"></a>00105 
+<a name="l00107"></a>00107 
+<a name="l00108"></a>00108 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator, <span class="keyword">typename</span> Compare&gt;
+<a name="l00109"></a>00109 <span class="keyword">class </span>quick_sort_pretest_body : internal::no_assign {
+<a name="l00110"></a>00110     <span class="keyword">const</span> Compare &amp;comp;
+<a name="l00111"></a>00111 
+<a name="l00112"></a>00112 <span class="keyword">public</span>:
+<a name="l00113"></a>00113     quick_sort_pretest_body(<span class="keyword">const</span> Compare &amp;_comp) : comp(_comp) {}
+<a name="l00114"></a>00114 
+<a name="l00115"></a>00115     <span class="keywordtype">void</span> operator()( <span class="keyword">const</span> blocked_range&lt;RandomAccessIterator&gt;&amp; range )<span class="keyword"> const </span>{
+<a name="l00116"></a>00116         task &amp;my_task = <a class="code" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">task::self</a>();
+<a name="l00117"></a>00117         RandomAccessIterator my_end = range.end();
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119         <span class="keywordtype">int</span> i = 0;
+<a name="l00120"></a>00120         <span class="keywordflow">for</span> (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) {
+<a name="l00121"></a>00121             <span class="keywordflow">if</span> ( i%64 == 0 &amp;&amp; my_task.is_cancelled() ) <span class="keywordflow">break</span>;
+<a name="l00122"></a>00122           
+<a name="l00123"></a>00123             <span class="comment">// The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1</span>
+<a name="l00124"></a>00124             <span class="keywordflow">if</span> ( comp( *(k), *(k-1) ) ) {
+<a name="l00125"></a>00125                 my_task.cancel_group_execution();
+<a name="l00126"></a>00126                 <span class="keywordflow">break</span>;
+<a name="l00127"></a>00127             }
+<a name="l00128"></a>00128         }
+<a name="l00129"></a>00129     }
+<a name="l00130"></a>00130 
+<a name="l00131"></a>00131 };
+<a name="l00132"></a>00132 
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator, <span class="keyword">typename</span> Compare&gt;
+<a name="l00136"></a>00136 <span class="keyword">struct </span>quick_sort_body {
+<a name="l00137"></a>00137     <span class="keywordtype">void</span> operator()( <span class="keyword">const</span> quick_sort_range&lt;RandomAccessIterator,Compare&gt;&amp; range )<span class="keyword"> const </span>{
+<a name="l00138"></a>00138         <span class="comment">//SerialQuickSort( range.begin, range.size, range.comp );</span>
+<a name="l00139"></a>00139         std::sort( range.begin, range.begin + range.size, range.comp );
+<a name="l00140"></a>00140     }
+<a name="l00141"></a>00141 };
+<a name="l00142"></a>00142 
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator, <span class="keyword">typename</span> Compare&gt;
+<a name="l00146"></a>00146 <span class="keywordtype">void</span> parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, <span class="keyword">const</span> Compare&amp; comp ) {
+<a name="l00147"></a>00147     task_group_context my_context;
+<a name="l00148"></a>00148     <span class="keyword">const</span> <span class="keywordtype">int</span> serial_cutoff = 9;
+<a name="l00149"></a>00149 
+<a name="l00150"></a>00150     __TBB_ASSERT( begin + serial_cutoff &lt; end, <span class="stringliteral">"min_parallel_size is smaller than serial cutoff?"</span> );
+<a name="l00151"></a>00151     RandomAccessIterator k;
+<a name="l00152"></a>00152     <span class="keywordflow">for</span> ( k = begin ; k != begin + serial_cutoff; ++k ) {
+<a name="l00153"></a>00153         <span class="keywordflow">if</span> ( comp( *(k+1), *k ) ) {
+<a name="l00154"></a>00154             <span class="keywordflow">goto</span> do_parallel_quick_sort;
+<a name="l00155"></a>00155         }
+<a name="l00156"></a>00156     }
+<a name="l00157"></a>00157 
+<a name="l00158"></a>00158     <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( blocked_range&lt;RandomAccessIterator&gt;(k+1, end),
+<a name="l00159"></a>00159                   quick_sort_pretest_body&lt;RandomAccessIterator,Compare&gt;(comp),
+<a name="l00160"></a>00160                   auto_partitioner(),
+<a name="l00161"></a>00161                   my_context);
+<a name="l00162"></a>00162 
+<a name="l00163"></a>00163     <span class="keywordflow">if</span> (my_context.is_group_execution_cancelled())
+<a name="l00164"></a>00164 do_parallel_quick_sort:
+<a name="l00165"></a>00165         <a class="code" href="a00280.html#g68cc046ef72c42ce205fccbc435a0d81">parallel_for</a>( quick_sort_range&lt;RandomAccessIterator,Compare&gt;(begin, end-begin, comp ), 
+<a name="l00166"></a>00166                       quick_sort_body&lt;RandomAccessIterator,Compare&gt;(),
+<a name="l00167"></a>00167                       auto_partitioner() );
+<a name="l00168"></a>00168 }
+<a name="l00169"></a>00169 
+<a name="l00170"></a>00170 } <span class="comment">// namespace internal</span>
+<a name="l00172"></a>00172 <span class="comment"></span>
+<a name="l00183"></a>00183 
+<a name="l00185"></a>00185 
+<a name="l00188"></a>00188 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator, <span class="keyword">typename</span> Compare&gt;
+<a name="l00189"></a><a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">00189</a> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a>( RandomAccessIterator begin, RandomAccessIterator end, <span class="keyword">const</span> Compare&amp; comp) { 
+<a name="l00190"></a>00190     <span class="keyword">const</span> <span class="keywordtype">int</span> min_parallel_size = 500; 
+<a name="l00191"></a>00191     <span class="keywordflow">if</span>( end &gt; begin ) {
+<a name="l00192"></a>00192         <span class="keywordflow">if</span> (end - begin &lt; min_parallel_size) { 
+<a name="l00193"></a>00193             std::sort(begin, end, comp);
+<a name="l00194"></a>00194         } <span class="keywordflow">else</span> {
+<a name="l00195"></a>00195             internal::parallel_quick_sort(begin, end, comp);
+<a name="l00196"></a>00196         }
+<a name="l00197"></a>00197     }
+<a name="l00198"></a>00198 }
+<a name="l00199"></a>00199 
+<a name="l00201"></a>00201 
+<a name="l00202"></a>00202 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> RandomAccessIterator&gt;
+<a name="l00203"></a><a class="code" href="a00280.html#g16c3eb77d0e530834c51ce3857f01012">00203</a> <span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a>( RandomAccessIterator begin, RandomAccessIterator end ) { 
+<a name="l00204"></a>00204     <a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a>( begin, end, std::less&lt; <span class="keyword">typename</span> std::iterator_traits&lt;RandomAccessIterator&gt;::value_type &gt;() );
+<a name="l00205"></a>00205 }
+<a name="l00206"></a>00206 
+<a name="l00208"></a>00208 
+<a name="l00209"></a>00209 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00210"></a><a class="code" href="a00280.html#gc7576f82fdedc8a701a6c17ad9415926">00210</a> <span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a>( T * begin, T * end ) {
+<a name="l00211"></a>00211     <a class="code" href="a00280.html#g49edcf9447cd91a9527a3f8e8512b7aa">parallel_sort</a>( begin, end, std::less&lt; T &gt;() );
+<a name="l00212"></a>00212 }   
+<a name="l00214"></a>00214 
+<a name="l00215"></a>00215 
+<a name="l00216"></a>00216 } <span class="comment">// namespace tbb</span>
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218 <span class="preprocessor">#endif</span>
+<a name="l00219"></a>00219 <span class="preprocessor"></span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00410.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00410.html
new file mode 100644 (file)
index 0000000..2669e8c
--- /dev/null
@@ -0,0 +1,196 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>parallel_while.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>parallel_while.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_parallel_while</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_parallel_while</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00030"></a>00030 <span class="keyword">class </span>parallel_while;
+<a name="l00031"></a>00031 
+<a name="l00033"></a>00033 <span class="keyword">namespace </span>internal {
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Stream, <span class="keyword">typename</span> Body&gt; <span class="keyword">class </span>while_task;
+<a name="l00036"></a>00036 
+<a name="l00038"></a>00038 
+<a name="l00040"></a>00040     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00041"></a>00041     <span class="keyword">class </span>while_iteration_task: <span class="keyword">public</span> task {
+<a name="l00042"></a>00042         <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00043"></a>00043         <span class="keyword">typename</span> Body::argument_type my_value;
+<a name="l00044"></a>00044         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00045"></a>00045             my_body(my_value); 
+<a name="l00046"></a>00046             <span class="keywordflow">return</span> NULL;
+<a name="l00047"></a>00047         }
+<a name="l00048"></a>00048         while_iteration_task( <span class="keyword">const</span> <span class="keyword">typename</span> Body::argument_type&amp; value, <span class="keyword">const</span> Body&amp; body ) : 
+<a name="l00049"></a>00049             my_body(body), my_value(value)
+<a name="l00050"></a>00050         {}
+<a name="l00051"></a>00051         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>while_group_task;
+<a name="l00052"></a>00052         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00184.html">tbb::parallel_while</a>&lt;Body&gt;;
+<a name="l00053"></a>00053     };
+<a name="l00054"></a>00054 
+<a name="l00056"></a>00056 
+<a name="l00058"></a>00058     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00059"></a>00059     <span class="keyword">class </span>while_group_task: <span class="keyword">public</span> task {
+<a name="l00060"></a>00060         <span class="keyword">static</span> <span class="keyword">const</span> size_t max_arg_size = 4;         
+<a name="l00061"></a>00061         <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00062"></a>00062         size_t size;
+<a name="l00063"></a>00063         <span class="keyword">typename</span> Body::argument_type my_arg[max_arg_size];
+<a name="l00064"></a>00064         while_group_task( <span class="keyword">const</span> Body&amp; body ) : my_body(body), size(0) {} 
+<a name="l00065"></a>00065         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00066"></a>00066             <span class="keyword">typedef</span> while_iteration_task&lt;Body&gt; iteration_type;
+<a name="l00067"></a>00067             __TBB_ASSERT( size&gt;0, NULL );
+<a name="l00068"></a>00068             task_list list;
+<a name="l00069"></a>00069             task* t; 
+<a name="l00070"></a>00070             size_t k=0; 
+<a name="l00071"></a>00071             <span class="keywordflow">for</span>(;;) {
+<a name="l00072"></a>00072                 t = <span class="keyword">new</span>( allocate_child() ) iteration_type(my_arg[k],my_body); 
+<a name="l00073"></a>00073                 <span class="keywordflow">if</span>( ++k==size ) <span class="keywordflow">break</span>;
+<a name="l00074"></a>00074                 list.push_back(*t);
+<a name="l00075"></a>00075             }
+<a name="l00076"></a>00076             set_ref_count(<span class="keywordtype">int</span>(k+1));
+<a name="l00077"></a>00077             spawn(list);
+<a name="l00078"></a>00078             spawn_and_wait_for_all(*t);
+<a name="l00079"></a>00079             <span class="keywordflow">return</span> NULL;
+<a name="l00080"></a>00080         }
+<a name="l00081"></a>00081         <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Stream, <span class="keyword">typename</span> Body_&gt; <span class="keyword">friend</span> <span class="keyword">class </span>while_task;
+<a name="l00082"></a>00082     };
+<a name="l00083"></a>00083     
+<a name="l00085"></a>00085 
+<a name="l00087"></a>00087     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Stream, <span class="keyword">typename</span> Body&gt;
+<a name="l00088"></a>00088     <span class="keyword">class </span>while_task: <span class="keyword">public</span> task {
+<a name="l00089"></a>00089         Stream&amp; my_stream;
+<a name="l00090"></a>00090         <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00091"></a>00091         empty_task&amp; my_barrier;
+<a name="l00092"></a>00092         <span class="comment">/*override*/</span> task* execute() {
+<a name="l00093"></a>00093             <span class="keyword">typedef</span> while_group_task&lt;Body&gt; block_type;
+<a name="l00094"></a>00094             block_type&amp; t = *<span class="keyword">new</span>( allocate_additional_child_of(my_barrier) ) block_type(my_body);
+<a name="l00095"></a>00095             size_t k=0; 
+<a name="l00096"></a>00096             <span class="keywordflow">while</span>( my_stream.pop_if_present(t.my_arg[k]) ) {
+<a name="l00097"></a>00097                 <span class="keywordflow">if</span>( ++k==block_type::max_arg_size ) {
+<a name="l00098"></a>00098                     <span class="comment">// There might be more iterations.</span>
+<a name="l00099"></a>00099                     recycle_to_reexecute();
+<a name="l00100"></a>00100                     <span class="keywordflow">break</span>;
+<a name="l00101"></a>00101                 }
+<a name="l00102"></a>00102             }
+<a name="l00103"></a>00103             <span class="keywordflow">if</span>( k==0 ) {
+<a name="l00104"></a>00104                 destroy(t);
+<a name="l00105"></a>00105                 <span class="keywordflow">return</span> NULL;
+<a name="l00106"></a>00106             } <span class="keywordflow">else</span> {
+<a name="l00107"></a>00107                 t.size = k;
+<a name="l00108"></a>00108                 <span class="keywordflow">return</span> &amp;t;
+<a name="l00109"></a>00109             }
+<a name="l00110"></a>00110         }
+<a name="l00111"></a>00111         while_task( Stream&amp; stream, <span class="keyword">const</span> Body&amp; body, empty_task&amp; barrier ) : 
+<a name="l00112"></a>00112             my_stream(stream),
+<a name="l00113"></a>00113             my_body(body),
+<a name="l00114"></a>00114             my_barrier(barrier)
+<a name="l00115"></a>00115         {} 
+<a name="l00116"></a>00116         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00184.html">tbb::parallel_while</a>&lt;Body&gt;;
+<a name="l00117"></a>00117     };
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119 } <span class="comment">// namespace internal</span>
+<a name="l00121"></a>00121 <span class="comment"></span>
+<a name="l00123"></a>00123 
+<a name="l00128"></a>00128 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00129"></a><a class="code" href="a00184.html">00129</a> <span class="keyword">class </span><a class="code" href="a00184.html">parallel_while</a>: internal::no_copy {
+<a name="l00130"></a>00130 <span class="keyword">public</span>:
+<a name="l00132"></a><a class="code" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">00132</a>     <a class="code" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">parallel_while</a>() : my_body(NULL), my_barrier(NULL) {}
+<a name="l00133"></a>00133 
+<a name="l00135"></a><a class="code" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">00135</a>     <a class="code" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">~parallel_while</a>() {
+<a name="l00136"></a>00136         <span class="keywordflow">if</span>( my_barrier ) {
+<a name="l00137"></a>00137             my_barrier-&gt;<a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a>(*my_barrier);    
+<a name="l00138"></a>00138             my_barrier = NULL;
+<a name="l00139"></a>00139         }
+<a name="l00140"></a>00140     }
+<a name="l00141"></a>00141 
+<a name="l00143"></a><a class="code" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">00143</a>     <span class="keyword">typedef</span> <span class="keyword">typename</span> Body::argument_type <a class="code" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a>;
+<a name="l00144"></a>00144 
+<a name="l00146"></a>00146 
+<a name="l00149"></a>00149     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Stream&gt;
+<a name="l00150"></a>00150     <span class="keywordtype">void</span> <a class="code" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">run</a>( Stream&amp; stream, <span class="keyword">const</span> Body&amp; body );
+<a name="l00151"></a>00151 
+<a name="l00153"></a>00153 
+<a name="l00154"></a>00154     <span class="keywordtype">void</span> <a class="code" href="a00184.html#e131c560057a58229992b61eb8dba4c6">add</a>( <span class="keyword">const</span> <a class="code" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">value_type</a>&amp; item );
+<a name="l00155"></a>00155 
+<a name="l00156"></a>00156 <span class="keyword">private</span>:
+<a name="l00157"></a>00157     <span class="keyword">const</span> Body* my_body;
+<a name="l00158"></a>00158     <a class="code" href="a00167.html">empty_task</a>* my_barrier;
+<a name="l00159"></a>00159 };
+<a name="l00160"></a>00160 
+<a name="l00161"></a>00161 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00162"></a>00162 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Stream&gt;
+<a name="l00163"></a><a class="code" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">00163</a> <span class="keywordtype">void</span> <a class="code" href="a00184.html">parallel_while&lt;Body&gt;::run</a>( Stream&amp; stream, <span class="keyword">const</span> Body&amp; body ) {
+<a name="l00164"></a>00164     <span class="keyword">using namespace </span>internal;
+<a name="l00165"></a>00165     <a class="code" href="a00167.html">empty_task</a>&amp; barrier = *<span class="keyword">new</span>( <a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>() ) <a class="code" href="a00167.html">empty_task</a>();
+<a name="l00166"></a>00166     my_body = &amp;body;
+<a name="l00167"></a>00167     my_barrier = &amp;barrier;
+<a name="l00168"></a>00168     my_barrier-&gt;<a class="code" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">set_ref_count</a>(2);
+<a name="l00169"></a>00169     while_task&lt;Stream,Body&gt;&amp; w = *<span class="keyword">new</span>( my_barrier-&gt;<a class="code" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">allocate_child</a>() ) while_task&lt;Stream,Body&gt;( stream, body, barrier );
+<a name="l00170"></a>00170     my_barrier-&gt;<a class="code" href="a00204.html#3ce28ca9baa771cfc37ecd72e69c4f3c">spawn_and_wait_for_all</a>(w);
+<a name="l00171"></a>00171     my_barrier-&gt;<a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a>(*my_barrier);
+<a name="l00172"></a>00172     my_barrier = NULL;
+<a name="l00173"></a>00173     my_body = NULL;
+<a name="l00174"></a>00174 }
+<a name="l00175"></a>00175 
+<a name="l00176"></a>00176 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00177"></a><a class="code" href="a00184.html#e131c560057a58229992b61eb8dba4c6">00177</a> <span class="keywordtype">void</span> <a class="code" href="a00184.html">parallel_while&lt;Body&gt;::add</a>( <span class="keyword">const</span> value_type&amp; item ) {
+<a name="l00178"></a>00178     __TBB_ASSERT(my_barrier,<span class="stringliteral">"attempt to add to parallel_while that is not running"</span>);
+<a name="l00179"></a>00179     <span class="keyword">typedef</span> internal::while_iteration_task&lt;Body&gt; iteration_type;
+<a name="l00180"></a>00180     iteration_type&amp; i = *<span class="keyword">new</span>( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body);
+<a name="l00181"></a>00181     <a class="code" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">task::self</a>().spawn( i );
+<a name="l00182"></a>00182 }
+<a name="l00183"></a>00183 
+<a name="l00184"></a>00184 } <span class="comment">// namespace </span>
+<a name="l00185"></a>00185 
+<a name="l00186"></a>00186 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_parallel_while */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00414.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00414.html
new file mode 100644 (file)
index 0000000..f9b246f
--- /dev/null
@@ -0,0 +1,232 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>partitioner.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>partitioner.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_partitioner_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_partitioner_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 <span class="keyword">class </span>affinity_partitioner;
+<a name="l00028"></a>00028 
+<a name="l00030"></a>00030 <span class="keyword">namespace </span>internal {
+<a name="l00031"></a>00031 size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor();
+<a name="l00032"></a>00032 
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="keyword">class </span>affinity_partitioner_base_v3: no_copy {
+<a name="l00036"></a>00036     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00145.html">tbb::affinity_partitioner</a>;
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039     affinity_id* my_array;
+<a name="l00041"></a>00041     size_t my_size;
+<a name="l00043"></a>00043     affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {}
+<a name="l00045"></a>00045     ~affinity_partitioner_base_v3() {resize(0);}
+<a name="l00047"></a>00047 
+<a name="l00048"></a>00048     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD resize( <span class="keywordtype">unsigned</span> factor );
+<a name="l00049"></a>00049     <span class="keyword">friend</span> <span class="keyword">class </span>affinity_partition_type;
+<a name="l00050"></a>00050 };
+<a name="l00051"></a>00051 
+<a name="l00053"></a>00053 <span class="keyword">class </span>partition_type_base {
+<a name="l00054"></a>00054 <span class="keyword">public</span>:
+<a name="l00055"></a>00055     <span class="keywordtype">void</span> set_affinity( task &amp; ) {}
+<a name="l00056"></a>00056     <span class="keywordtype">void</span> note_affinity( <a class="code" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">task::affinity_id</a> ) {}
+<a name="l00057"></a>00057     task* continue_after_execute_range() {<span class="keywordflow">return</span> NULL;}
+<a name="l00058"></a>00058     <span class="keywordtype">bool</span> decide_whether_to_delay() {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00059"></a>00059     <span class="keywordtype">void</span> spawn_or_delay( <span class="keywordtype">bool</span>, task&amp; b ) {
+<a name="l00060"></a>00060         task::spawn(b);
+<a name="l00061"></a>00061     }
+<a name="l00062"></a>00062 };
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064 <span class="keyword">class </span>affinity_partition_type;
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">class </span>start_for;
+<a name="l00067"></a>00067 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">class </span>start_reduce;
+<a name="l00068"></a>00068 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt; <span class="keyword">class </span>start_reduce_with_affinity;
+<a name="l00069"></a>00069 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">class </span>start_scan;
+<a name="l00070"></a>00070 
+<a name="l00071"></a>00071 } <span class="comment">// namespace internal</span>
+<a name="l00073"></a>00073 <span class="comment"></span>
+<a name="l00075"></a>00075 
+<a name="l00077"></a><a class="code" href="a00198.html">00077</a> <span class="keyword">class </span><a class="code" href="a00198.html">simple_partitioner</a> {
+<a name="l00078"></a>00078 <span class="keyword">public</span>:
+<a name="l00079"></a>00079     <a class="code" href="a00198.html">simple_partitioner</a>() {}
+<a name="l00080"></a>00080 <span class="keyword">private</span>:
+<a name="l00081"></a>00081     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_for;
+<a name="l00082"></a>00082     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_reduce;
+<a name="l00083"></a>00083     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_scan;
+<a name="l00084"></a>00084 
+<a name="l00085"></a>00085     <span class="keyword">class </span>partition_type: <span class="keyword">public</span> internal::partition_type_base {
+<a name="l00086"></a>00086     <span class="keyword">public</span>:
+<a name="l00087"></a>00087         <span class="keywordtype">bool</span> should_execute_range(<span class="keyword">const</span> <a class="code" href="a00204.html">task</a>&amp; ) {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00088"></a>00088         partition_type( <span class="keyword">const</span> <a class="code" href="a00198.html">simple_partitioner</a>&amp; ) {}
+<a name="l00089"></a>00089         partition_type( <span class="keyword">const</span> partition_type&amp;, <a class="code" href="a00203.html">split</a> ) {}
+<a name="l00090"></a>00090     };
+<a name="l00091"></a>00091 };
+<a name="l00092"></a>00092 
+<a name="l00094"></a>00094 
+<a name="l00097"></a><a class="code" href="a00150.html">00097</a> <span class="keyword">class </span><a class="code" href="a00150.html">auto_partitioner</a> {
+<a name="l00098"></a>00098 <span class="keyword">public</span>:
+<a name="l00099"></a>00099     <a class="code" href="a00150.html">auto_partitioner</a>() {}
+<a name="l00100"></a>00100 
+<a name="l00101"></a>00101 <span class="keyword">private</span>:
+<a name="l00102"></a>00102     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_for;
+<a name="l00103"></a>00103     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_reduce;
+<a name="l00104"></a>00104     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_scan;
+<a name="l00105"></a>00105 
+<a name="l00106"></a>00106     <span class="keyword">class </span>partition_type: <span class="keyword">public</span> internal::partition_type_base {
+<a name="l00107"></a>00107         size_t num_chunks;
+<a name="l00108"></a>00108         <span class="keyword">static</span> <span class="keyword">const</span> size_t VICTIM_CHUNKS = 4;
+<a name="l00109"></a>00109 <span class="keyword">public</span>:
+<a name="l00110"></a>00110         <span class="keywordtype">bool</span> should_execute_range(<span class="keyword">const</span> <a class="code" href="a00204.html">task</a> &amp;t) {
+<a name="l00111"></a>00111             <span class="keywordflow">if</span>( num_chunks&lt;VICTIM_CHUNKS &amp;&amp; t.<a class="code" href="a00204.html#f9169402702f56bf519448aaf34450aa">is_stolen_task</a>() )
+<a name="l00112"></a>00112                 num_chunks = VICTIM_CHUNKS;
+<a name="l00113"></a>00113             <span class="keywordflow">return</span> num_chunks==1;
+<a name="l00114"></a>00114         }
+<a name="l00115"></a>00115         partition_type( <span class="keyword">const</span> <a class="code" href="a00150.html">auto_partitioner</a>&amp; ) : num_chunks(internal::get_initial_auto_partitioner_divisor()) {}
+<a name="l00116"></a>00116         partition_type( partition_type&amp; pt, <a class="code" href="a00203.html">split</a> ) {
+<a name="l00117"></a>00117             num_chunks = pt.num_chunks /= 2u;
+<a name="l00118"></a>00118         }
+<a name="l00119"></a>00119     };
+<a name="l00120"></a>00120 };
+<a name="l00121"></a>00121 
+<a name="l00123"></a><a class="code" href="a00145.html">00123</a> <span class="keyword">class </span><a class="code" href="a00145.html">affinity_partitioner</a>: internal::affinity_partitioner_base_v3 {
+<a name="l00124"></a>00124 <span class="keyword">public</span>:
+<a name="l00125"></a>00125     <a class="code" href="a00145.html">affinity_partitioner</a>() {}
+<a name="l00126"></a>00126 
+<a name="l00127"></a>00127 <span class="keyword">private</span>:
+<a name="l00128"></a>00128     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_for;
+<a name="l00129"></a>00129     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_reduce;
+<a name="l00130"></a>00130     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_reduce_with_affinity;
+<a name="l00131"></a>00131     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Range, <span class="keyword">typename</span> Body, <span class="keyword">typename</span> Partitioner&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::start_scan;
+<a name="l00132"></a>00132 
+<a name="l00133"></a>00133     <span class="keyword">typedef</span> internal::affinity_partition_type partition_type;
+<a name="l00134"></a>00134     <span class="keyword">friend</span> <span class="keyword">class </span>internal::affinity_partition_type;
+<a name="l00135"></a>00135 };
+<a name="l00136"></a>00136 
+<a name="l00138"></a>00138 <span class="keyword">namespace </span>internal {
+<a name="l00139"></a>00139 
+<a name="l00140"></a>00140 <span class="keyword">class </span>affinity_partition_type: <span class="keyword">public</span> no_copy {
+<a name="l00142"></a>00142     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> factor = 16;
+<a name="l00143"></a>00143     <span class="keyword">static</span> <span class="keyword">const</span> size_t VICTIM_CHUNKS = 4;
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145     internal::affinity_id* my_array;
+<a name="l00146"></a>00146     <a class="code" href="a00207.html">task_list</a> delay_list;
+<a name="l00147"></a>00147     <span class="keywordtype">unsigned</span> map_begin, map_end;
+<a name="l00148"></a>00148     size_t num_chunks;
+<a name="l00149"></a>00149 <span class="keyword">public</span>:
+<a name="l00150"></a>00150     affinity_partition_type( <a class="code" href="a00145.html">affinity_partitioner</a>&amp; ap ) {
+<a name="l00151"></a>00151         __TBB_ASSERT( (factor&amp;(factor-1))==0, <span class="stringliteral">"factor must be power of two"</span> ); 
+<a name="l00152"></a>00152         ap.resize(factor);
+<a name="l00153"></a>00153         my_array = ap.my_array;
+<a name="l00154"></a>00154         map_begin = 0;
+<a name="l00155"></a>00155         map_end = unsigned(ap.my_size);
+<a name="l00156"></a>00156         num_chunks = internal::get_initial_auto_partitioner_divisor();
+<a name="l00157"></a>00157     }
+<a name="l00158"></a>00158     affinity_partition_type(affinity_partition_type&amp; p, split) : my_array(p.my_array) {
+<a name="l00159"></a>00159         __TBB_ASSERT( p.map_end-p.map_begin&lt;factor || (p.map_end-p.map_begin)%factor==0, NULL );
+<a name="l00160"></a>00160         num_chunks = p.num_chunks /= 2;
+<a name="l00161"></a>00161         <span class="keywordtype">unsigned</span> e = p.map_end;
+<a name="l00162"></a>00162         <span class="keywordtype">unsigned</span> d = (e - p.map_begin)/2;
+<a name="l00163"></a>00163         <span class="keywordflow">if</span>( d&gt;factor ) 
+<a name="l00164"></a>00164             d &amp;= 0u-factor;
+<a name="l00165"></a>00165         map_end = e;
+<a name="l00166"></a>00166         map_begin = p.map_end = e-d;
+<a name="l00167"></a>00167     }
+<a name="l00168"></a>00168 
+<a name="l00169"></a>00169     <span class="keywordtype">bool</span> should_execute_range(<span class="keyword">const</span> task &amp;t) {
+<a name="l00170"></a>00170         <span class="keywordflow">if</span>( num_chunks &lt; VICTIM_CHUNKS &amp;&amp; t.is_stolen_task() )
+<a name="l00171"></a>00171             num_chunks = VICTIM_CHUNKS;
+<a name="l00172"></a>00172         <span class="keywordflow">return</span> num_chunks == 1;
+<a name="l00173"></a>00173     }
+<a name="l00174"></a>00174 
+<a name="l00175"></a>00175     <span class="keywordtype">void</span> set_affinity( task &amp;t ) {
+<a name="l00176"></a>00176         <span class="keywordflow">if</span>( map_begin&lt;map_end )
+<a name="l00177"></a>00177             t.set_affinity( my_array[map_begin] );
+<a name="l00178"></a>00178     }
+<a name="l00179"></a>00179     <span class="keywordtype">void</span> note_affinity( <a class="code" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">task::affinity_id</a> <span class="keywordtype">id</span> ) {
+<a name="l00180"></a>00180         <span class="keywordflow">if</span>( map_begin&lt;map_end ) 
+<a name="l00181"></a>00181             my_array[map_begin] = id;
+<a name="l00182"></a>00182     }
+<a name="l00183"></a>00183     task* continue_after_execute_range() {
+<a name="l00184"></a>00184         task* first = NULL;
+<a name="l00185"></a>00185         <span class="keywordflow">if</span>( !delay_list.empty() ) {
+<a name="l00186"></a>00186             first = &amp;delay_list.pop_front();
+<a name="l00187"></a>00187             <span class="keywordflow">while</span>( !delay_list.empty() ) {
+<a name="l00188"></a>00188                 task::spawn(*first);
+<a name="l00189"></a>00189                 first = &amp;delay_list.pop_front();
+<a name="l00190"></a>00190             }
+<a name="l00191"></a>00191         }
+<a name="l00192"></a>00192         <span class="keywordflow">return</span> first;
+<a name="l00193"></a>00193     }
+<a name="l00194"></a>00194     <span class="keywordtype">bool</span> decide_whether_to_delay() {
+<a name="l00195"></a>00195         <span class="comment">// The possible underflow caused by "-1u" is deliberate</span>
+<a name="l00196"></a>00196         <span class="keywordflow">return</span> (map_begin&amp;(factor-1))==0 &amp;&amp; map_end-map_begin-1u&lt;factor;
+<a name="l00197"></a>00197     }
+<a name="l00198"></a>00198     <span class="keywordtype">void</span> spawn_or_delay( <span class="keywordtype">bool</span> delay, task&amp; b ) {
+<a name="l00199"></a>00199         <span class="keywordflow">if</span>( delay )  
+<a name="l00200"></a>00200             delay_list.push_back(b);
+<a name="l00201"></a>00201         <span class="keywordflow">else</span> 
+<a name="l00202"></a>00202             task::spawn(b);
+<a name="l00203"></a>00203     }
+<a name="l00204"></a>00204 
+<a name="l00205"></a>00205     ~affinity_partition_type() {
+<a name="l00206"></a>00206         <span class="comment">// The delay_list can be non-empty if an exception is thrown.</span>
+<a name="l00207"></a>00207         <span class="keywordflow">while</span>( !delay_list.empty() ) {
+<a name="l00208"></a>00208             task&amp; t = delay_list.pop_front();
+<a name="l00209"></a>00209             t.destroy(t);
+<a name="l00210"></a>00210         } 
+<a name="l00211"></a>00211     }
+<a name="l00212"></a>00212 };
+<a name="l00213"></a>00213 
+<a name="l00214"></a>00214 } <span class="comment">// namespace internal</span>
+<a name="l00216"></a>00216 <span class="comment"></span>
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218 } <span class="comment">// namespace tbb</span>
+<a name="l00219"></a>00219 
+<a name="l00220"></a>00220 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_partitioner_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00419.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00419.html
new file mode 100644 (file)
index 0000000..89c2dbe
--- /dev/null
@@ -0,0 +1,511 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>pipeline.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>pipeline.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_pipeline_H </span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_pipeline_H </span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "task.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include &lt;cstddef&gt;</span>
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>tbb {
+<a name="l00030"></a>00030 
+<a name="l00031"></a>00031 <span class="keyword">class </span>pipeline;
+<a name="l00032"></a>00032 <span class="keyword">class </span>filter;
+<a name="l00033"></a>00033 
+<a name="l00035"></a>00035 <span class="keyword">namespace </span>internal {
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037 <span class="comment">// The argument for PIPELINE_VERSION should be an integer between 2 and 9</span>
+<a name="l00038"></a>00038 <span class="preprocessor">#define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)&lt;&lt;1</span>
+<a name="l00039"></a>00039 <span class="preprocessor"></span>
+<a name="l00040"></a>00040 <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> Token;
+<a name="l00041"></a>00041 <span class="keyword">typedef</span> <span class="keywordtype">long</span> tokendiff_t;
+<a name="l00042"></a>00042 <span class="keyword">class </span>stage_task;
+<a name="l00043"></a>00043 <span class="keyword">class </span>input_buffer;
+<a name="l00044"></a>00044 <span class="keyword">class </span>pipeline_root_task;
+<a name="l00045"></a>00045 <span class="keyword">class </span>pipeline_cleaner;
+<a name="l00046"></a>00046 
+<a name="l00047"></a>00047 } <span class="comment">// namespace internal</span>
+<a name="l00048"></a>00048 
+<a name="l00049"></a>00049 <span class="keyword">namespace </span>interface5 {
+<a name="l00050"></a>00050     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt; <span class="keyword">class </span>filter_t;
+<a name="l00051"></a>00051 
+<a name="l00052"></a>00052     <span class="keyword">namespace </span>internal {
+<a name="l00053"></a>00053         <span class="keyword">class </span>pipeline_proxy;
+<a name="l00054"></a>00054     }
+<a name="l00055"></a>00055 }
+<a name="l00056"></a>00056 
+<a name="l00058"></a>00058 
+<a name="l00060"></a>00060 
+<a name="l00061"></a><a class="code" href="a00169.html">00061</a> <span class="keyword">class </span><a class="code" href="a00169.html">filter</a>: internal::no_copy {
+<a name="l00062"></a>00062 <span class="keyword">private</span>:
+<a name="l00064"></a>00064     <span class="keyword">static</span> <a class="code" href="a00169.html">filter</a>* not_in_pipeline() {<span class="keywordflow">return</span> reinterpret_cast&lt;filter*&gt;(intptr_t(-1));}
+<a name="l00065"></a>00065     
+<a name="l00067"></a>00067     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> filter_is_serial = 0x1; 
+<a name="l00068"></a>00068 
+<a name="l00070"></a>00070 
+<a name="l00072"></a>00072     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> filter_is_out_of_order = 0x1&lt;&lt;4;  
+<a name="l00073"></a>00073 
+<a name="l00075"></a>00075     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> filter_is_bound = 0x1&lt;&lt;5;  
+<a name="l00076"></a>00076 
+<a name="l00078"></a>00078     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> exact_exception_propagation =
+<a name="l00079"></a>00079 <span class="preprocessor">#if TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00080"></a>00080 <span class="preprocessor"></span>            0x0;
+<a name="l00081"></a>00081 <span class="preprocessor">#else</span>
+<a name="l00082"></a>00082 <span class="preprocessor"></span>            0x1&lt;&lt;7;
+<a name="l00083"></a>00083 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00084"></a>00084 
+<a name="l00085"></a>00085     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> current_version = __TBB_PIPELINE_VERSION(5);
+<a name="l00086"></a>00086     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> version_mask = 0x7&lt;&lt;1; <span class="comment">// bits 1-3 are for version</span>
+<a name="l00087"></a>00087 <span class="keyword">public</span>:
+<a name="l00088"></a>00088     <span class="keyword">enum</span> mode {
+<a name="l00090"></a>00090         parallel = current_version | filter_is_out_of_order, 
+<a name="l00092"></a>00092         serial_in_order = current_version | filter_is_serial,
+<a name="l00094"></a>00094         serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order,
+<a name="l00096"></a>00096         serial = serial_in_order
+<a name="l00097"></a>00097     };
+<a name="l00098"></a>00098 <span class="keyword">protected</span>:
+<a name="l00099"></a>00099     <a class="code" href="a00169.html">filter</a>( <span class="keywordtype">bool</span> is_serial_ ) : 
+<a name="l00100"></a>00100         next_filter_in_pipeline(not_in_pipeline()),
+<a name="l00101"></a>00101         my_input_buffer(NULL),
+<a name="l00102"></a>00102         my_filter_mode(static_cast&lt;unsigned char&gt;((is_serial_ ? serial : parallel) | exact_exception_propagation)),
+<a name="l00103"></a>00103         prev_filter_in_pipeline(not_in_pipeline()),
+<a name="l00104"></a>00104         my_pipeline(NULL),
+<a name="l00105"></a>00105         next_segment(NULL)
+<a name="l00106"></a>00106     {}
+<a name="l00107"></a>00107     
+<a name="l00108"></a>00108     <a class="code" href="a00169.html">filter</a>( mode filter_mode ) :
+<a name="l00109"></a>00109         next_filter_in_pipeline(not_in_pipeline()),
+<a name="l00110"></a>00110         my_input_buffer(NULL),
+<a name="l00111"></a>00111         my_filter_mode(static_cast&lt;unsigned char&gt;(filter_mode | exact_exception_propagation)),
+<a name="l00112"></a>00112         prev_filter_in_pipeline(not_in_pipeline()),
+<a name="l00113"></a>00113         my_pipeline(NULL),
+<a name="l00114"></a>00114         next_segment(NULL)
+<a name="l00115"></a>00115     {}
+<a name="l00116"></a>00116 
+<a name="l00117"></a>00117 <span class="keyword">public</span>:
+<a name="l00119"></a><a class="code" href="a00169.html#fcfec27656a69ff2072802ac001e936f">00119</a>     <span class="keywordtype">bool</span> <a class="code" href="a00169.html#fcfec27656a69ff2072802ac001e936f">is_serial</a>()<span class="keyword"> const </span>{
+<a name="l00120"></a>00120         <span class="keywordflow">return</span> bool( my_filter_mode &amp; filter_is_serial );
+<a name="l00121"></a>00121     }  
+<a name="l00122"></a>00122     
+<a name="l00124"></a><a class="code" href="a00169.html#cd53206c4795ef2df5df26b795caf692">00124</a>     <span class="keywordtype">bool</span> <a class="code" href="a00169.html#cd53206c4795ef2df5df26b795caf692">is_ordered</a>()<span class="keyword"> const </span>{
+<a name="l00125"></a>00125         <span class="keywordflow">return</span> (my_filter_mode &amp; (filter_is_out_of_order|filter_is_serial))==filter_is_serial;
+<a name="l00126"></a>00126     }
+<a name="l00127"></a>00127 
+<a name="l00129"></a><a class="code" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">00129</a>     <span class="keywordtype">bool</span> <a class="code" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">is_bound</a>()<span class="keyword"> const </span>{
+<a name="l00130"></a>00130         <span class="keywordflow">return</span> ( my_filter_mode &amp; filter_is_bound )==filter_is_bound;
+<a name="l00131"></a>00131     }
+<a name="l00132"></a>00132 
+<a name="l00134"></a>00134 
+<a name="l00135"></a>00135     <span class="keyword">virtual</span> <span class="keywordtype">void</span>* <a class="code" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">operator()</a>( <span class="keywordtype">void</span>* item ) = 0;
+<a name="l00136"></a>00136 
+<a name="l00138"></a>00138 
+<a name="l00139"></a>00139     <span class="keyword">virtual</span> __TBB_EXPORTED_METHOD <a class="code" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">~filter</a>();
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00143"></a>00143 <span class="preprocessor"></span>
+<a name="l00145"></a><a class="code" href="a00169.html#56275eb889c77c4807967133e21401bd">00145</a> <span class="preprocessor">    virtual void finalize( void* </span><span class="comment">/*item*/</span> ) {};
+<a name="l00146"></a>00146 <span class="preprocessor">#endif</span>
+<a name="l00147"></a>00147 <span class="preprocessor"></span>
+<a name="l00148"></a>00148 <span class="keyword">private</span>:
+<a name="l00150"></a>00150     <a class="code" href="a00169.html">filter</a>* next_filter_in_pipeline;
+<a name="l00151"></a>00151 
+<a name="l00153"></a>00153     <span class="comment">//  (pipeline has not yet reached end_of_input or this filter has not yet</span>
+<a name="l00154"></a>00154     <span class="comment">//  seen the last token produced by input_filter)</span>
+<a name="l00155"></a>00155     <span class="keywordtype">bool</span> has_more_work();
+<a name="l00156"></a>00156 
+<a name="l00158"></a>00158 
+<a name="l00159"></a>00159     internal::input_buffer* my_input_buffer;
+<a name="l00160"></a>00160 
+<a name="l00161"></a>00161     <span class="keyword">friend</span> <span class="keyword">class </span>internal::stage_task;
+<a name="l00162"></a>00162     <span class="keyword">friend</span> <span class="keyword">class </span>internal::pipeline_root_task;
+<a name="l00163"></a>00163     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00185.html">pipeline</a>;
+<a name="l00164"></a>00164     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00214.html">thread_bound_filter</a>;
+<a name="l00165"></a>00165 
+<a name="l00167"></a>00167     <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> my_filter_mode;
+<a name="l00168"></a>00168 
+<a name="l00170"></a>00170     <a class="code" href="a00169.html">filter</a>* prev_filter_in_pipeline;
+<a name="l00171"></a>00171 
+<a name="l00173"></a>00173     <a class="code" href="a00185.html">pipeline</a>* my_pipeline;
+<a name="l00174"></a>00174 
+<a name="l00176"></a>00176 
+<a name="l00177"></a>00177     <a class="code" href="a00169.html">filter</a>* next_segment;
+<a name="l00178"></a>00178 };
+<a name="l00179"></a>00179 
+<a name="l00181"></a>00181 
+<a name="l00182"></a><a class="code" href="a00214.html">00182</a> <span class="keyword">class </span><a class="code" href="a00214.html">thread_bound_filter</a>: <span class="keyword">public</span> <a class="code" href="a00169.html">filter</a> {
+<a name="l00183"></a>00183 <span class="keyword">public</span>:
+<a name="l00184"></a>00184     <span class="keyword">enum</span> result_type {
+<a name="l00185"></a>00185         <span class="comment">// item was processed</span>
+<a name="l00186"></a>00186         success,
+<a name="l00187"></a>00187         <span class="comment">// item is currently not available</span>
+<a name="l00188"></a>00188         item_not_available,
+<a name="l00189"></a>00189         <span class="comment">// there are no more items to process</span>
+<a name="l00190"></a>00190         end_of_stream
+<a name="l00191"></a>00191     };
+<a name="l00192"></a>00192 <span class="keyword">protected</span>:
+<a name="l00193"></a>00193     <a class="code" href="a00214.html">thread_bound_filter</a>(mode filter_mode): 
+<a name="l00194"></a>00194          <a class="code" href="a00169.html">filter</a>(static_cast&lt;mode&gt;(filter_mode | filter::filter_is_bound | filter::exact_exception_propagation))
+<a name="l00195"></a>00195     {}
+<a name="l00196"></a>00196 <span class="keyword">public</span>:
+<a name="l00198"></a>00198 
+<a name="l00203"></a>00203     result_type __TBB_EXPORTED_METHOD <a class="code" href="a00214.html#c4f90f2c771bce748beb9be734fa286c">try_process_item</a>(); 
+<a name="l00204"></a>00204 
+<a name="l00206"></a>00206 
+<a name="l00210"></a>00210     result_type __TBB_EXPORTED_METHOD <a class="code" href="a00214.html#5e726bdc7fbd924c0b07bd558b1d4d5d">process_item</a>();
+<a name="l00211"></a>00211 
+<a name="l00212"></a>00212 <span class="keyword">private</span>:
+<a name="l00214"></a>00214     result_type internal_process_item(<span class="keywordtype">bool</span> is_blocking);
+<a name="l00215"></a>00215 };
+<a name="l00216"></a>00216 
+<a name="l00218"></a>00218 
+<a name="l00219"></a><a class="code" href="a00185.html">00219</a> <span class="keyword">class </span><a class="code" href="a00185.html">pipeline</a> {
+<a name="l00220"></a>00220 <span class="keyword">public</span>:
+<a name="l00222"></a>00222     __TBB_EXPORTED_METHOD <a class="code" href="a00185.html#596dc3beba27099c4c8581cb419e1a59">pipeline</a>();
+<a name="l00223"></a>00223 
+<a name="l00226"></a>00226     <span class="keyword">virtual</span> __TBB_EXPORTED_METHOD <a class="code" href="a00185.html#49513c6c24f9d5bbbb27edca5efe01c9">~pipeline</a>();
+<a name="l00227"></a>00227 
+<a name="l00229"></a>00229     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00185.html#38fb5c9c8395dd6f89a4ae2011a83e0d">add_filter</a>( <a class="code" href="a00169.html">filter</a>&amp; filter_ );
+<a name="l00230"></a>00230 
+<a name="l00232"></a>00232     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00185.html#f627616049b3fe36801f37ee40403ef8">run</a>( size_t max_number_of_live_tokens );
+<a name="l00233"></a>00233 
+<a name="l00234"></a>00234 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00236"></a>00236 <span class="preprocessor">    void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context&amp; context );</span>
+<a name="l00237"></a>00237 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00238"></a>00238 <span class="preprocessor"></span>
+<a name="l00240"></a>00240     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00185.html#2c84aef5b834b555ee220b176e25931e">clear</a>();
+<a name="l00241"></a>00241 
+<a name="l00242"></a>00242 <span class="keyword">private</span>:
+<a name="l00243"></a>00243     <span class="keyword">friend</span> <span class="keyword">class </span>internal::stage_task;
+<a name="l00244"></a>00244     <span class="keyword">friend</span> <span class="keyword">class </span>internal::pipeline_root_task;
+<a name="l00245"></a>00245     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00169.html">filter</a>;
+<a name="l00246"></a>00246     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00214.html">thread_bound_filter</a>;
+<a name="l00247"></a>00247     <span class="keyword">friend</span> <span class="keyword">class </span>internal::pipeline_cleaner;
+<a name="l00248"></a>00248     <span class="keyword">friend</span> <span class="keyword">class </span>tbb::interface5::internal::pipeline_proxy;
+<a name="l00249"></a>00249 
+<a name="l00251"></a>00251     <a class="code" href="a00169.html">filter</a>* filter_list;
+<a name="l00252"></a>00252 
+<a name="l00254"></a>00254     <a class="code" href="a00169.html">filter</a>* filter_end;
+<a name="l00255"></a>00255 
+<a name="l00257"></a>00257     <a class="code" href="a00204.html">task</a>* end_counter;
+<a name="l00258"></a>00258 
+<a name="l00260"></a>00260     <a class="code" href="a00147.html">atomic&lt;internal::Token&gt;</a> input_tokens;
+<a name="l00261"></a>00261 
+<a name="l00263"></a>00263     <a class="code" href="a00147.html">atomic&lt;internal::Token&gt;</a> token_counter;
+<a name="l00264"></a>00264 
+<a name="l00266"></a>00266     <span class="keywordtype">bool</span> end_of_input;
+<a name="l00267"></a>00267 
+<a name="l00269"></a>00269     <span class="keywordtype">bool</span> has_thread_bound_filters;
+<a name="l00270"></a>00270 
+<a name="l00272"></a>00272     <span class="keywordtype">void</span> remove_filter( <a class="code" href="a00169.html">filter</a>&amp; filter_ );
+<a name="l00273"></a>00273 
+<a name="l00275"></a>00275     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD inject_token( <a class="code" href="a00204.html">task</a>&amp; <span class="keyword">self</span> );
+<a name="l00276"></a>00276 
+<a name="l00277"></a>00277 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00279"></a>00279 <span class="preprocessor">    void clear_filters();</span>
+<a name="l00280"></a>00280 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00281"></a>00281 <span class="preprocessor"></span>};
+<a name="l00282"></a>00282 
+<a name="l00283"></a>00283 <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00284"></a>00284 <span class="comment">// Support for lambda-friendly parallel_pipeline interface</span>
+<a name="l00285"></a>00285 <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00286"></a>00286 
+<a name="l00287"></a>00287 <span class="keyword">namespace </span>interface5 {
+<a name="l00288"></a>00288 
+<a name="l00289"></a>00289 <span class="keyword">namespace </span>internal {
+<a name="l00290"></a>00290     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt; <span class="keyword">class </span>concrete_filter;
+<a name="l00291"></a>00291 }
+<a name="l00292"></a>00292 
+<a name="l00294"></a><a class="code" href="a00172.html">00294</a> <span class="keyword">class </span><a class="code" href="a00172.html">flow_control</a> {
+<a name="l00295"></a>00295     <span class="keywordtype">bool</span> is_pipeline_stopped;
+<a name="l00296"></a>00296     <a class="code" href="a00172.html">flow_control</a>() { is_pipeline_stopped = <span class="keyword">false</span>; }
+<a name="l00297"></a>00297     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::concrete_filter;
+<a name="l00298"></a>00298 <span class="keyword">public</span>:
+<a name="l00299"></a>00299     <span class="keywordtype">void</span> stop() { is_pipeline_stopped = <span class="keyword">true</span>; }
+<a name="l00300"></a>00300 };
+<a name="l00301"></a>00301 
+<a name="l00303"></a>00303 <span class="keyword">namespace </span>internal {
+<a name="l00304"></a>00304 
+<a name="l00305"></a>00305 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt;
+<a name="l00306"></a>00306 <span class="keyword">class </span>concrete_filter: <span class="keyword">public</span> tbb::<a class="code" href="a00169.html">filter</a> {
+<a name="l00307"></a>00307     <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00308"></a>00308 
+<a name="l00309"></a>00309     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00209.html">tbb::tbb_allocator&lt;U&gt;</a> u_allocator;
+<a name="l00310"></a>00310     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00209.html">tbb::tbb_allocator&lt;T&gt;</a> t_allocator;
+<a name="l00311"></a>00311 
+<a name="l00312"></a>00312     <span class="comment">/*override*/</span> <span class="keywordtype">void</span>* operator()(<span class="keywordtype">void</span>* input) {
+<a name="l00313"></a>00313         T* temp_input = (T*)input;
+<a name="l00314"></a>00314         <span class="comment">// Call user's operator()() here</span>
+<a name="l00315"></a>00315         U* output_u = u_allocator().allocate(1);
+<a name="l00316"></a>00316         <span class="keywordtype">void</span>* output = (<span class="keywordtype">void</span>*) <span class="keyword">new</span> (output_u) U(my_body(*temp_input)); 
+<a name="l00317"></a>00317         t_allocator().destroy(temp_input);
+<a name="l00318"></a>00318         t_allocator().deallocate(temp_input,1);
+<a name="l00319"></a>00319         <span class="keywordflow">return</span> output;
+<a name="l00320"></a>00320     }
+<a name="l00321"></a>00321 
+<a name="l00322"></a>00322 <span class="keyword">public</span>:
+<a name="l00323"></a>00323     concrete_filter(tbb::filter::mode filter_mode, <span class="keyword">const</span> Body&amp; body) : <a class="code" href="a00169.html">filter</a>(filter_mode), my_body(body) {}
+<a name="l00324"></a>00324 };
+<a name="l00325"></a>00325 
+<a name="l00326"></a>00326 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt;
+<a name="l00327"></a>00327 <span class="keyword">class </span>concrete_filter&lt;void,U,Body&gt;: <span class="keyword">public</span> filter {
+<a name="l00328"></a>00328     <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00329"></a>00329 
+<a name="l00330"></a>00330     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00209.html">tbb::tbb_allocator&lt;U&gt;</a> u_allocator;
+<a name="l00331"></a>00331 
+<a name="l00332"></a>00332     <span class="comment">/*override*/</span><span class="keywordtype">void</span>* operator()(<span class="keywordtype">void</span>*) {
+<a name="l00333"></a>00333         flow_control control;
+<a name="l00334"></a>00334         U* output_u = u_allocator().allocate(1);
+<a name="l00335"></a>00335         (void) <span class="keyword">new</span> (output_u) U(my_body(control));
+<a name="l00336"></a>00336         <span class="keywordflow">if</span>(control.is_pipeline_stopped) {
+<a name="l00337"></a>00337             u_allocator().destroy(output_u);
+<a name="l00338"></a>00338             u_allocator().deallocate(output_u,1);
+<a name="l00339"></a>00339             output_u = NULL;
+<a name="l00340"></a>00340         }
+<a name="l00341"></a>00341         <span class="keywordflow">return</span> (<span class="keywordtype">void</span>*)output_u;
+<a name="l00342"></a>00342     }
+<a name="l00343"></a>00343 <span class="keyword">public</span>:
+<a name="l00344"></a>00344     concrete_filter(tbb::filter::mode filter_mode, <span class="keyword">const</span> Body&amp; body) : filter(filter_mode), my_body(body) {}
+<a name="l00345"></a>00345 };
+<a name="l00346"></a>00346 
+<a name="l00347"></a>00347 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> Body&gt;
+<a name="l00348"></a>00348 <span class="keyword">class </span>concrete_filter&lt;T,void,Body&gt;: <span class="keyword">public</span> filter {
+<a name="l00349"></a>00349     <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00350"></a>00350    
+<a name="l00351"></a>00351     <span class="keyword">typedef</span> <span class="keyword">typename</span> <a class="code" href="a00209.html">tbb::tbb_allocator&lt;T&gt;</a> t_allocator;
+<a name="l00352"></a>00352 
+<a name="l00353"></a>00353     <span class="comment">/*override*/</span> <span class="keywordtype">void</span>* operator()(<span class="keywordtype">void</span>* input) {
+<a name="l00354"></a>00354         T* temp_input = (T*)input;
+<a name="l00355"></a>00355         my_body(*temp_input);
+<a name="l00356"></a>00356         t_allocator().destroy(temp_input);
+<a name="l00357"></a>00357         t_allocator().deallocate(temp_input,1);
+<a name="l00358"></a>00358         <span class="keywordflow">return</span> NULL;
+<a name="l00359"></a>00359     }
+<a name="l00360"></a>00360 <span class="keyword">public</span>:
+<a name="l00361"></a>00361     concrete_filter(tbb::filter::mode filter_mode, <span class="keyword">const</span> Body&amp; body) : filter(filter_mode), my_body(body) {}
+<a name="l00362"></a>00362 };
+<a name="l00363"></a>00363 
+<a name="l00364"></a>00364 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00365"></a>00365 <span class="keyword">class </span>concrete_filter&lt;void,void,Body&gt;: <span class="keyword">public</span> filter {
+<a name="l00366"></a>00366     <span class="keyword">const</span> Body&amp; my_body;
+<a name="l00367"></a>00367     
+<a name="l00369"></a>00369     <span class="comment">/*override*/</span> <span class="keywordtype">void</span>* operator()(<span class="keywordtype">void</span>*) {
+<a name="l00370"></a>00370         flow_control control;
+<a name="l00371"></a>00371         my_body(control);
+<a name="l00372"></a>00372         <span class="keywordtype">void</span>* output = control.is_pipeline_stopped ? NULL : (<span class="keywordtype">void</span>*)(intptr_t)-1; 
+<a name="l00373"></a>00373         <span class="keywordflow">return</span> output;
+<a name="l00374"></a>00374     }
+<a name="l00375"></a>00375 <span class="keyword">public</span>:
+<a name="l00376"></a>00376     concrete_filter(filter::mode filter_mode, <span class="keyword">const</span> Body&amp; body) : filter(filter_mode), my_body(body) {}
+<a name="l00377"></a>00377 };
+<a name="l00378"></a>00378 
+<a name="l00380"></a>00380 
+<a name="l00381"></a>00381 <span class="keyword">class </span>pipeline_proxy {
+<a name="l00382"></a>00382     <a class="code" href="a00185.html">tbb::pipeline</a> my_pipe;
+<a name="l00383"></a>00383 <span class="keyword">public</span>:
+<a name="l00384"></a>00384     pipeline_proxy( <span class="keyword">const</span> filter_t&lt;void,void&gt;&amp; filter_chain );
+<a name="l00385"></a>00385     ~pipeline_proxy() {
+<a name="l00386"></a>00386         <span class="keywordflow">while</span>( filter* f = my_pipe.filter_list ) 
+<a name="l00387"></a>00387             <span class="keyword">delete</span> f; <span class="comment">// filter destructor removes it from the pipeline</span>
+<a name="l00388"></a>00388     }
+<a name="l00389"></a>00389     <a class="code" href="a00185.html">tbb::pipeline</a>* operator-&gt;() { <span class="keywordflow">return</span> &amp;my_pipe; }
+<a name="l00390"></a>00390 };
+<a name="l00391"></a>00391 
+<a name="l00393"></a>00393 
+<a name="l00394"></a>00394 <span class="keyword">class </span>filter_node: tbb::internal::no_copy {
+<a name="l00396"></a>00396     <a class="code" href="a00147.html">tbb::atomic&lt;intptr_t&gt;</a> ref_count;
+<a name="l00397"></a>00397 <span class="keyword">protected</span>:
+<a name="l00398"></a>00398     filter_node() {
+<a name="l00399"></a>00399         ref_count = 0;
+<a name="l00400"></a>00400 <span class="preprocessor">#ifdef __TBB_TEST_FILTER_NODE_COUNT</span>
+<a name="l00401"></a>00401 <span class="preprocessor"></span>        ++(__TBB_TEST_FILTER_NODE_COUNT);
+<a name="l00402"></a>00402 <span class="preprocessor">#endif</span>
+<a name="l00403"></a>00403 <span class="preprocessor"></span>    }
+<a name="l00404"></a>00404 <span class="keyword">public</span>:
+<a name="l00406"></a>00406     <span class="keyword">virtual</span> <span class="keywordtype">void</span> add_to( pipeline&amp; ) = 0;
+<a name="l00408"></a>00408     <span class="keywordtype">void</span> add_ref() {++ref_count;}
+<a name="l00410"></a>00410     <span class="keywordtype">void</span> remove_ref() {
+<a name="l00411"></a>00411         __TBB_ASSERT(ref_count&gt;0,<span class="stringliteral">"ref_count underflow"</span>);
+<a name="l00412"></a>00412         <span class="keywordflow">if</span>( --ref_count==0 ) 
+<a name="l00413"></a>00413             <span class="keyword">delete</span> <span class="keyword">this</span>;
+<a name="l00414"></a>00414     }
+<a name="l00415"></a>00415     <span class="keyword">virtual</span> ~filter_node() {
+<a name="l00416"></a>00416 <span class="preprocessor">#ifdef __TBB_TEST_FILTER_NODE_COUNT</span>
+<a name="l00417"></a>00417 <span class="preprocessor"></span>        --(__TBB_TEST_FILTER_NODE_COUNT);
+<a name="l00418"></a>00418 <span class="preprocessor">#endif</span>
+<a name="l00419"></a>00419 <span class="preprocessor"></span>    }
+<a name="l00420"></a>00420 };
+<a name="l00421"></a>00421 
+<a name="l00423"></a>00423 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt;
+<a name="l00424"></a>00424 <span class="keyword">class </span>filter_node_leaf: <span class="keyword">public</span> filter_node  {
+<a name="l00425"></a>00425     <span class="keyword">const</span> tbb::filter::mode mode;
+<a name="l00426"></a>00426     <span class="keyword">const</span> Body body;
+<a name="l00427"></a>00427     <span class="comment">/*override*/</span><span class="keywordtype">void</span> add_to( pipeline&amp; p ) {
+<a name="l00428"></a>00428         concrete_filter&lt;T,U,Body&gt;* f = <span class="keyword">new</span> concrete_filter&lt;T,U,Body&gt;(mode,body);
+<a name="l00429"></a>00429         p.add_filter( *f );
+<a name="l00430"></a>00430     }
+<a name="l00431"></a>00431 <span class="keyword">public</span>:
+<a name="l00432"></a>00432     filter_node_leaf( tbb::filter::mode m, <span class="keyword">const</span> Body&amp; b ) : mode(m), body(b) {}
+<a name="l00433"></a>00433 };
+<a name="l00434"></a>00434 
+<a name="l00436"></a>00436 <span class="keyword">class </span>filter_node_join: <span class="keyword">public</span> filter_node {
+<a name="l00437"></a>00437     <span class="keyword">friend</span> <span class="keyword">class </span>filter_node; <span class="comment">// to suppress GCC 3.2 warnings</span>
+<a name="l00438"></a>00438     filter_node&amp; left;
+<a name="l00439"></a>00439     filter_node&amp; right;
+<a name="l00440"></a>00440     <span class="comment">/*override*/</span>~filter_node_join() {
+<a name="l00441"></a>00441        left.remove_ref();
+<a name="l00442"></a>00442        right.remove_ref();
+<a name="l00443"></a>00443     }
+<a name="l00444"></a>00444     <span class="comment">/*override*/</span><span class="keywordtype">void</span> add_to( pipeline&amp; p ) {
+<a name="l00445"></a>00445         left.add_to(p);
+<a name="l00446"></a>00446         right.add_to(p);
+<a name="l00447"></a>00447     }
+<a name="l00448"></a>00448 <span class="keyword">public</span>:
+<a name="l00449"></a>00449     filter_node_join( filter_node&amp; x, filter_node&amp; y ) : left(x), right(y) {
+<a name="l00450"></a>00450        left.add_ref();
+<a name="l00451"></a>00451        right.add_ref();
+<a name="l00452"></a>00452     }
+<a name="l00453"></a>00453 };
+<a name="l00454"></a>00454 
+<a name="l00455"></a>00455 } <span class="comment">// namespace internal</span>
+<a name="l00457"></a>00457 <span class="comment"></span>
+<a name="l00459"></a>00459 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U, <span class="keyword">typename</span> Body&gt;
+<a name="l00460"></a><a class="code" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">00460</a> <a class="code" href="a00170.html">filter_t&lt;T,U&gt;</a> make_filter(tbb::filter::mode mode, <span class="keyword">const</span> Body&amp; body) {
+<a name="l00461"></a>00461     <span class="keywordflow">return</span> <span class="keyword">new</span> internal::filter_node_leaf&lt;T,U,Body&gt;(mode, body);
+<a name="l00462"></a>00462 }
+<a name="l00463"></a>00463 
+<a name="l00464"></a>00464 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> V, <span class="keyword">typename</span> U&gt;
+<a name="l00465"></a>00465 <a class="code" href="a00170.html">filter_t&lt;T,U&gt;</a> operator&amp; (<span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;T,V&gt;</a>&amp; left, <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;V,U&gt;</a>&amp; right) {
+<a name="l00466"></a>00466     __TBB_ASSERT(left.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>,<span class="stringliteral">"cannot use default-constructed filter_t as left argument of '&amp;'"</span>);
+<a name="l00467"></a>00467     __TBB_ASSERT(right.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>,<span class="stringliteral">"cannot use default-constructed filter_t as right argument of '&amp;'"</span>);
+<a name="l00468"></a>00468     <span class="keywordflow">return</span> <span class="keyword">new</span> internal::filter_node_join(*left.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>,*right.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>);
+<a name="l00469"></a>00469 }
+<a name="l00470"></a>00470 
+<a name="l00472"></a>00472 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00473"></a><a class="code" href="a00170.html">00473</a> <span class="keyword">class </span><a class="code" href="a00170.html">filter_t</a> {
+<a name="l00474"></a>00474     <span class="keyword">typedef</span> internal::filter_node filter_node;
+<a name="l00475"></a>00475     filter_node* root;
+<a name="l00476"></a>00476     <a class="code" href="a00170.html">filter_t</a>( filter_node* root_ ) : root(root_) {
+<a name="l00477"></a>00477         root-&gt;add_ref();
+<a name="l00478"></a>00478     }
+<a name="l00479"></a>00479     <span class="keyword">friend</span> <span class="keyword">class </span>internal::pipeline_proxy;
+<a name="l00480"></a>00480     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T_, <span class="keyword">typename</span> U_, <span class="keyword">typename</span> Body&gt;
+<a name="l00481"></a>00481     <span class="keyword">friend</span> <a class="code" href="a00170.html">filter_t&lt;T_,U_&gt;</a> <a class="code" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">make_filter</a>(tbb::filter::mode, <span class="keyword">const</span> Body&amp; );
+<a name="l00482"></a>00482     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T_, <span class="keyword">typename</span> V_, <span class="keyword">typename</span> U_&gt;
+<a name="l00483"></a>00483     <span class="keyword">friend</span> <a class="code" href="a00170.html">filter_t&lt;T_,U_&gt;</a> operator&amp; (<span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;T_,V_&gt;</a>&amp; , <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;V_,U_&gt;</a>&amp; );
+<a name="l00484"></a>00484 <span class="keyword">public</span>:
+<a name="l00485"></a>00485     <a class="code" href="a00170.html">filter_t</a>() : root(NULL) {}
+<a name="l00486"></a>00486     <a class="code" href="a00170.html">filter_t</a>( <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;T,U&gt;</a>&amp; rhs ) : root(rhs.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>) {
+<a name="l00487"></a>00487         <span class="keywordflow">if</span>( root ) root-&gt;add_ref();
+<a name="l00488"></a>00488     }
+<a name="l00489"></a>00489     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> Body&gt;
+<a name="l00490"></a>00490     <a class="code" href="a00170.html">filter_t</a>( tbb::filter::mode mode, <span class="keyword">const</span> Body&amp; body ) :
+<a name="l00491"></a>00491         root( <span class="keyword">new</span> internal::filter_node_leaf&lt;T,U,Body&gt;(mode, body) ) {
+<a name="l00492"></a>00492         root-&gt;add_ref();
+<a name="l00493"></a>00493     }
+<a name="l00494"></a>00494 
+<a name="l00495"></a>00495     <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;T,U&gt;</a>&amp; rhs ) {
+<a name="l00496"></a>00496         <span class="comment">// Order of operations below carefully chosen so that reference counts remain correct</span>
+<a name="l00497"></a>00497         <span class="comment">// in unlikely event that remove_ref throws exception.</span>
+<a name="l00498"></a>00498         filter_node* old = root;
+<a name="l00499"></a>00499         root = rhs.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>; 
+<a name="l00500"></a>00500         <span class="keywordflow">if</span>( root ) root-&gt;add_ref();
+<a name="l00501"></a>00501         <span class="keywordflow">if</span>( old ) old-&gt;remove_ref();
+<a name="l00502"></a>00502     }
+<a name="l00503"></a>00503     ~<a class="code" href="a00170.html">filter_t</a>() {
+<a name="l00504"></a>00504         <span class="keywordflow">if</span>( root ) root-&gt;remove_ref();
+<a name="l00505"></a>00505     }
+<a name="l00506"></a>00506     <span class="keywordtype">void</span> clear() {
+<a name="l00507"></a>00507         <span class="comment">// Like operator= with filter_t() on right side.</span>
+<a name="l00508"></a>00508         <span class="keywordflow">if</span>( root ) {
+<a name="l00509"></a>00509             filter_node* old = root;
+<a name="l00510"></a>00510             root = NULL;
+<a name="l00511"></a>00511             old-&gt;remove_ref();
+<a name="l00512"></a>00512         }
+<a name="l00513"></a>00513     }
+<a name="l00514"></a>00514 };
+<a name="l00515"></a>00515 
+<a name="l00516"></a>00516 <span class="keyword">inline</span> internal::pipeline_proxy::pipeline_proxy( <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;void,void&gt;</a>&amp; filter_chain ) : my_pipe() {
+<a name="l00517"></a>00517     __TBB_ASSERT( filter_chain.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>, <span class="stringliteral">"cannot apply parallel_pipeline to default-constructed filter_t"</span>  );
+<a name="l00518"></a>00518     filter_chain.<a class="code" href="a00170.html#a44d0f06c15bc89c324c5581956799fd">root</a>-&gt;add_to(my_pipe);
+<a name="l00519"></a>00519 }
+<a name="l00520"></a>00520 
+<a name="l00521"></a>00521 <span class="keyword">inline</span> <span class="keywordtype">void</span> parallel_pipeline(size_t max_number_of_live_tokens, <span class="keyword">const</span> <a class="code" href="a00170.html">filter_t&lt;void,void&gt;</a>&amp; filter_chain
+<a name="l00522"></a>00522 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00523"></a>00523     , <a class="code" href="a00206.html">tbb::task_group_context</a>&amp; context
+<a name="l00524"></a>00524 #endif
+<a name="l00525"></a>00525     ) {
+<a name="l00526"></a>00526     internal::pipeline_proxy pipe(filter_chain);
+<a name="l00527"></a>00527     <span class="comment">// tbb::pipeline::run() is called via the proxy</span>
+<a name="l00528"></a>00528     pipe-&gt;run(max_number_of_live_tokens
+<a name="l00529"></a>00529 #<span class="keywordflow">if</span> __TBB_TASK_GROUP_CONTEXT
+<a name="l00530"></a>00530               , context
+<a name="l00531"></a>00531 #endif
+<a name="l00532"></a>00532     );
+<a name="l00533"></a>00533 }
+<a name="l00534"></a>00534 
+<a name="l00535"></a>00535 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00536"></a>00536 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">void</span> parallel_pipeline(size_t max_number_of_live_tokens, <span class="keyword">const</span> filter_t&lt;void,void&gt;&amp; filter_chain) {
+<a name="l00537"></a>00537     <a class="code" href="a00206.html">tbb::task_group_context</a> context;
+<a name="l00538"></a>00538     parallel_pipeline(max_number_of_live_tokens, filter_chain, context);
+<a name="l00539"></a>00539 }
+<a name="l00540"></a>00540 <span class="preprocessor">#endif // __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00541"></a>00541 <span class="preprocessor"></span>
+<a name="l00542"></a>00542 } <span class="comment">// interface5</span>
+<a name="l00543"></a>00543 
+<a name="l00544"></a>00544 <span class="keyword">using</span> interface5::flow_control;
+<a name="l00545"></a>00545 <span class="keyword">using</span> interface5::filter_t;
+<a name="l00546"></a>00546 <span class="keyword">using</span> interface5::make_filter;
+<a name="l00547"></a>00547 <span class="keyword">using</span> interface5::parallel_pipeline;
+<a name="l00548"></a>00548 
+<a name="l00549"></a>00549 } <span class="comment">// tbb</span>
+<a name="l00550"></a>00550 
+<a name="l00551"></a>00551 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_pipeline_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00431.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00431.html
new file mode 100644 (file)
index 0000000..82c6ab7
--- /dev/null
@@ -0,0 +1,136 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>queuing_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>queuing_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_queuing_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_queuing_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_config.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00028"></a>00028 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include &lt;cstring&gt;</span>
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="keyword">namespace </span>tbb {
+<a name="l00042"></a>00042 
+<a name="l00044"></a>00044 
+<a name="l00045"></a><a class="code" href="a00187.html">00045</a> <span class="keyword">class </span><a class="code" href="a00187.html">queuing_mutex</a> {
+<a name="l00046"></a>00046 <span class="keyword">public</span>:
+<a name="l00048"></a><a class="code" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">00048</a>     <a class="code" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">queuing_mutex</a>() {
+<a name="l00049"></a>00049         q_tail = NULL;
+<a name="l00050"></a>00050 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span>        internal_construct();
+<a name="l00052"></a>00052 <span class="preprocessor">#endif</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span>    }
+<a name="l00054"></a>00054 
+<a name="l00056"></a>00056 
+<a name="l00058"></a><a class="code" href="a00188.html">00058</a>     <span class="keyword">class </span><a class="code" href="a00188.html">scoped_lock</a>: internal::no_copy {
+<a name="l00060"></a>00060         <span class="keywordtype">void</span> initialize() {
+<a name="l00061"></a>00061             <a class="code" href="a00177.html">mutex</a> = NULL;
+<a name="l00062"></a>00062 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00063"></a>00063 <span class="preprocessor"></span>            internal::poison_pointer(next);
+<a name="l00064"></a>00064 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00065"></a>00065         }
+<a name="l00066"></a>00066     <span class="keyword">public</span>:
+<a name="l00068"></a>00068 
+<a name="l00069"></a><a class="code" href="a00188.html#db0fa3967491014572e24d6607bdc971">00069</a>         <a class="code" href="a00188.html#db0fa3967491014572e24d6607bdc971">scoped_lock</a>() {initialize();}
+<a name="l00070"></a>00070 
+<a name="l00072"></a><a class="code" href="a00188.html#9b51ef972f5618ac17caadb58841ab6d">00072</a>         <a class="code" href="a00188.html#db0fa3967491014572e24d6607bdc971">scoped_lock</a>( <a class="code" href="a00187.html">queuing_mutex</a>&amp; m ) {
+<a name="l00073"></a>00073             initialize();
+<a name="l00074"></a>00074             <a class="code" href="a00188.html#533e4fc8355ee321206a0609c42d909d">acquire</a>(m);
+<a name="l00075"></a>00075         }
+<a name="l00076"></a>00076 
+<a name="l00078"></a><a class="code" href="a00188.html#ac2c576a93570957d694192a5f491443">00078</a>         <a class="code" href="a00188.html#ac2c576a93570957d694192a5f491443">~scoped_lock</a>() {
+<a name="l00079"></a>00079             <span class="keywordflow">if</span>( <a class="code" href="a00177.html">mutex</a> ) <a class="code" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">release</a>();
+<a name="l00080"></a>00080         }
+<a name="l00081"></a>00081 
+<a name="l00083"></a>00083         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00188.html#533e4fc8355ee321206a0609c42d909d">acquire</a>( <a class="code" href="a00187.html">queuing_mutex</a>&amp; m );
+<a name="l00084"></a>00084 
+<a name="l00086"></a>00086         <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD <a class="code" href="a00188.html#e5a014fb817599386a87170cf2cf51a9">try_acquire</a>( <a class="code" href="a00187.html">queuing_mutex</a>&amp; m );
+<a name="l00087"></a>00087 
+<a name="l00089"></a>00089         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">release</a>();
+<a name="l00090"></a>00090 
+<a name="l00091"></a>00091     <span class="keyword">private</span>:
+<a name="l00093"></a>00093         <a class="code" href="a00187.html">queuing_mutex</a>* <a class="code" href="a00177.html">mutex</a>;
+<a name="l00094"></a>00094 
+<a name="l00096"></a>00096         <a class="code" href="a00188.html">scoped_lock</a> *next;
+<a name="l00097"></a>00097 
+<a name="l00099"></a>00099 
+<a name="l00102"></a>00102         uintptr_t going;
+<a name="l00103"></a>00103     };
+<a name="l00104"></a>00104 
+<a name="l00105"></a>00105     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00106"></a>00106 
+<a name="l00107"></a>00107     <span class="comment">// Mutex traits</span>
+<a name="l00108"></a>00108     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;
+<a name="l00109"></a>00109     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00110"></a>00110     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">true</span>;
+<a name="l00111"></a>00111 
+<a name="l00112"></a>00112     <span class="keyword">friend</span> <span class="keyword">class </span>scoped_lock;
+<a name="l00113"></a>00113 <span class="keyword">private</span>:
+<a name="l00115"></a>00115     <a class="code" href="a00147.html">atomic&lt;scoped_lock*&gt;</a> q_tail;
+<a name="l00116"></a>00116 
+<a name="l00117"></a>00117 };
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119 __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
+<a name="l00120"></a>00120 
+<a name="l00121"></a>00121 } <span class="comment">// namespace tbb</span>
+<a name="l00122"></a>00122 
+<a name="l00123"></a>00123 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_queuing_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00432.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00432.html
new file mode 100644 (file)
index 0000000..dcac765
--- /dev/null
@@ -0,0 +1,168 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>queuing_rw_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>queuing_rw_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_queuing_rw_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_queuing_rw_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_config.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00028"></a>00028 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include &lt;cstring&gt;</span>
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00040"></a>00040 
+<a name="l00041"></a>00041 <span class="keyword">namespace </span>tbb {
+<a name="l00042"></a>00042 
+<a name="l00044"></a>00044 
+<a name="l00047"></a><a class="code" href="a00189.html">00047</a> <span class="keyword">class </span><a class="code" href="a00189.html">queuing_rw_mutex</a> {
+<a name="l00048"></a>00048 <span class="keyword">public</span>:
+<a name="l00050"></a><a class="code" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">00050</a>     <a class="code" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">queuing_rw_mutex</a>() {
+<a name="l00051"></a>00051         q_tail = NULL;
+<a name="l00052"></a>00052 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span>        internal_construct();
+<a name="l00054"></a>00054 <span class="preprocessor">#endif</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span>    }
+<a name="l00056"></a>00056 
+<a name="l00058"></a><a class="code" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">00058</a>     <a class="code" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">~queuing_rw_mutex</a>() {
+<a name="l00059"></a>00059 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span>        __TBB_ASSERT( !q_tail, <span class="stringliteral">"destruction of an acquired mutex"</span>);
+<a name="l00061"></a>00061 <span class="preprocessor">#endif</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span>    }
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064     <span class="keyword">class </span>scoped_lock;
+<a name="l00065"></a>00065     <span class="keyword">friend</span> <span class="keyword">class </span>scoped_lock;
+<a name="l00066"></a>00066 
+<a name="l00068"></a>00068 
+<a name="l00070"></a><a class="code" href="a00190.html">00070</a>     <span class="keyword">class </span><a class="code" href="a00190.html">scoped_lock</a>: internal::no_copy {
+<a name="l00072"></a>00072         <span class="keywordtype">void</span> initialize() {
+<a name="l00073"></a>00073             <a class="code" href="a00177.html">mutex</a> = NULL;
+<a name="l00074"></a>00074 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00075"></a>00075 <span class="preprocessor"></span>            state = 0xFF; <span class="comment">// Set to invalid state</span>
+<a name="l00076"></a>00076             internal::poison_pointer(next);
+<a name="l00077"></a>00077             internal::poison_pointer(prev);
+<a name="l00078"></a>00078 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00079"></a>00079         }
+<a name="l00080"></a>00080     <span class="keyword">public</span>:
+<a name="l00082"></a>00082 
+<a name="l00083"></a><a class="code" href="a00190.html#c62e365be7bcbba091c9ea7454a4d22c">00083</a>         <a class="code" href="a00190.html#c62e365be7bcbba091c9ea7454a4d22c">scoped_lock</a>() {initialize();}
+<a name="l00084"></a>00084 
+<a name="l00086"></a><a class="code" href="a00190.html#fbb8798792d3aebb136c46fc63d2529e">00086</a>         <a class="code" href="a00190.html#c62e365be7bcbba091c9ea7454a4d22c">scoped_lock</a>( <a class="code" href="a00189.html">queuing_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write=<span class="keyword">true</span> ) {
+<a name="l00087"></a>00087             initialize();
+<a name="l00088"></a>00088             <a class="code" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">acquire</a>(m,write);
+<a name="l00089"></a>00089         }
+<a name="l00090"></a>00090 
+<a name="l00092"></a><a class="code" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">00092</a>         <a class="code" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">~scoped_lock</a>() {
+<a name="l00093"></a>00093             <span class="keywordflow">if</span>( <a class="code" href="a00177.html">mutex</a> ) <a class="code" href="a00190.html#67ae221109ddc69510ab593874e435d4">release</a>();
+<a name="l00094"></a>00094         }
+<a name="l00095"></a>00095 
+<a name="l00097"></a>00097         <span class="keywordtype">void</span> <a class="code" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">acquire</a>( <a class="code" href="a00189.html">queuing_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write=<span class="keyword">true</span> );
+<a name="l00098"></a>00098 
+<a name="l00100"></a>00100         <span class="keywordtype">bool</span> <a class="code" href="a00190.html#2e4ff6c9ec2fee6682f95290d1f42baa">try_acquire</a>( <a class="code" href="a00189.html">queuing_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write=<span class="keyword">true</span> );
+<a name="l00101"></a>00101 
+<a name="l00103"></a>00103         <span class="keywordtype">void</span> <a class="code" href="a00190.html#67ae221109ddc69510ab593874e435d4">release</a>();
+<a name="l00104"></a>00104 
+<a name="l00106"></a>00106 
+<a name="l00107"></a>00107         <span class="keywordtype">bool</span> <a class="code" href="a00190.html#11ba1da4a722c9e6f73339a52c487e82">upgrade_to_writer</a>();
+<a name="l00108"></a>00108 
+<a name="l00110"></a>00110         <span class="keywordtype">bool</span> <a class="code" href="a00190.html#0d2f93edf7b15ec4bcee138823220c52">downgrade_to_reader</a>();
+<a name="l00111"></a>00111 
+<a name="l00112"></a>00112     <span class="keyword">private</span>:
+<a name="l00114"></a>00114         <a class="code" href="a00189.html">queuing_rw_mutex</a>* <a class="code" href="a00177.html">mutex</a>;
+<a name="l00115"></a>00115 
+<a name="l00117"></a>00117         <a class="code" href="a00190.html">scoped_lock</a> * prev, * next;
+<a name="l00118"></a>00118 
+<a name="l00119"></a>00119         <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> state_t;
+<a name="l00120"></a>00120 
+<a name="l00122"></a>00122         <a class="code" href="a00147.html">atomic&lt;state_t&gt;</a> state;
+<a name="l00123"></a>00123 
+<a name="l00125"></a>00125 
+<a name="l00126"></a>00126         <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> going;
+<a name="l00127"></a>00127 
+<a name="l00129"></a>00129         <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> internal_lock;
+<a name="l00130"></a>00130 
+<a name="l00132"></a>00132         <span class="keywordtype">void</span> acquire_internal_lock();
+<a name="l00133"></a>00133 
+<a name="l00135"></a>00135 
+<a name="l00136"></a>00136         <span class="keywordtype">bool</span> try_acquire_internal_lock();
+<a name="l00137"></a>00137 
+<a name="l00139"></a>00139         <span class="keywordtype">void</span> release_internal_lock();
+<a name="l00140"></a>00140 
+<a name="l00142"></a>00142         <span class="keywordtype">void</span> wait_for_release_of_internal_lock();
+<a name="l00143"></a>00143 
+<a name="l00145"></a>00145         <span class="keywordtype">void</span> unblock_or_wait_on_internal_lock( uintptr_t );
+<a name="l00146"></a>00146     };
+<a name="l00147"></a>00147 
+<a name="l00148"></a>00148     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00149"></a>00149 
+<a name="l00150"></a>00150     <span class="comment">// Mutex traits</span>
+<a name="l00151"></a>00151     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">true</span>;
+<a name="l00152"></a>00152     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00153"></a>00153     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">true</span>;
+<a name="l00154"></a>00154 
+<a name="l00155"></a>00155 <span class="keyword">private</span>:
+<a name="l00157"></a>00157     <a class="code" href="a00147.html">atomic&lt;scoped_lock*&gt;</a> q_tail;
+<a name="l00158"></a>00158 
+<a name="l00159"></a>00159 };
+<a name="l00160"></a>00160 
+<a name="l00161"></a>00161 __TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex)
+<a name="l00162"></a>00162 
+<a name="l00163"></a>00163 } <span class="comment">// namespace tbb</span>
+<a name="l00164"></a>00164 
+<a name="l00165"></a>00165 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_queuing_rw_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00433.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00433.html
new file mode 100644 (file)
index 0000000..d341f32
--- /dev/null
@@ -0,0 +1,179 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>reader_writer_lock.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>reader_writer_lock.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_reader_writer_lock_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_reader_writer_lock_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_thread.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00027"></a>00027 
+<a name="l00028"></a>00028 <span class="keyword">namespace </span>tbb {
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>interface5 {
+<a name="l00031"></a>00031 
+<a name="l00034"></a><a class="code" href="a00191.html">00034</a>     <span class="keyword">class </span><a class="code" href="a00191.html">reader_writer_lock</a> : tbb::internal::no_copy {
+<a name="l00035"></a>00035  <span class="keyword">public</span>:
+<a name="l00036"></a>00036     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00192.html">scoped_lock</a>;
+<a name="l00037"></a>00037     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00193.html">scoped_lock_read</a>;
+<a name="l00039"></a>00039 
+<a name="l00074"></a><a class="code" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">00074</a>     <span class="keyword">enum</span> <a class="code" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">status_t</a> { waiting_nonblocking, waiting, active, invalid };
+<a name="l00075"></a>00075 
+<a name="l00077"></a><a class="code" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">00077</a>     <a class="code" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">reader_writer_lock</a>() {
+<a name="l00078"></a>00078         internal_construct();
+<a name="l00079"></a>00079     }
+<a name="l00080"></a>00080 
+<a name="l00082"></a><a class="code" href="a00191.html#5135f64f7b7339017f33d956445edbee">00082</a>     <a class="code" href="a00191.html#5135f64f7b7339017f33d956445edbee">~reader_writer_lock</a>() {
+<a name="l00083"></a>00083         internal_destroy();
+<a name="l00084"></a>00084     }
+<a name="l00085"></a>00085 
+<a name="l00087"></a>00087 
+<a name="l00089"></a><a class="code" href="a00192.html">00089</a>     <span class="keyword">class </span><a class="code" href="a00192.html">scoped_lock</a> : tbb::internal::no_copy {
+<a name="l00090"></a>00090     <span class="keyword">public</span>:
+<a name="l00091"></a>00091         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00191.html">reader_writer_lock</a>;
+<a name="l00092"></a>00092  
+<a name="l00094"></a><a class="code" href="a00192.html#cf19f20e082887c1bb0ba6b0911c3583">00094</a>         <a class="code" href="a00192.html">scoped_lock</a>(<a class="code" href="a00191.html">reader_writer_lock</a>&amp; <a class="code" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>) {
+<a name="l00095"></a>00095             internal_construct(<a class="code" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>);
+<a name="l00096"></a>00096         }
+<a name="l00097"></a>00097         
+<a name="l00099"></a><a class="code" href="a00192.html#70246e0260493625ff956fa5926fc71f">00099</a>         <a class="code" href="a00192.html#70246e0260493625ff956fa5926fc71f">~scoped_lock</a>() {
+<a name="l00100"></a>00100             internal_destroy();
+<a name="l00101"></a>00101         }
+<a name="l00102"></a>00102 
+<a name="l00103"></a>00103         <span class="keywordtype">void</span>* operator new(size_t s) {
+<a name="l00104"></a>00104             <span class="keywordflow">return</span> tbb::internal::allocate_via_handler_v3(s);
+<a name="l00105"></a>00105         }
+<a name="l00106"></a>00106         <span class="keywordtype">void</span> operator delete(<span class="keywordtype">void</span>* p) {
+<a name="l00107"></a>00107             tbb::internal::deallocate_via_handler_v3(p);
+<a name="l00108"></a>00108         }
+<a name="l00109"></a>00109 
+<a name="l00110"></a>00110     <span class="keyword">private</span>:
+<a name="l00112"></a>00112         <a class="code" href="a00191.html">reader_writer_lock</a> *<a class="code" href="a00177.html">mutex</a>;
+<a name="l00114"></a>00114         scoped_lock* next;
+<a name="l00116"></a>00116         <a class="code" href="a00147.html">atomic&lt;status_t&gt;</a> status;
+<a name="l00117"></a>00117 
+<a name="l00119"></a>00119         scoped_lock();
+<a name="l00120"></a>00120 
+<a name="l00121"></a>00121         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct(<a class="code" href="a00191.html">reader_writer_lock</a>&amp;);
+<a name="l00122"></a>00122         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_destroy();
+<a name="l00123"></a>00123    };
+<a name="l00124"></a>00124 
+<a name="l00126"></a><a class="code" href="a00193.html">00126</a>     <span class="keyword">class </span><a class="code" href="a00193.html">scoped_lock_read</a> : tbb::internal::no_copy {
+<a name="l00127"></a>00127     <span class="keyword">public</span>:
+<a name="l00128"></a>00128         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00191.html">reader_writer_lock</a>;
+<a name="l00129"></a>00129 
+<a name="l00131"></a><a class="code" href="a00193.html#87ab0dc8f7216e6ba0f7acd6aec33064">00131</a>         <a class="code" href="a00193.html">scoped_lock_read</a>(<a class="code" href="a00191.html">reader_writer_lock</a>&amp; <a class="code" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>) {
+<a name="l00132"></a>00132             internal_construct(<a class="code" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>);
+<a name="l00133"></a>00133         }
+<a name="l00134"></a>00134 
+<a name="l00136"></a><a class="code" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">00136</a>         <a class="code" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">~scoped_lock_read</a>() { 
+<a name="l00137"></a>00137             internal_destroy();
+<a name="l00138"></a>00138         }
+<a name="l00139"></a>00139         
+<a name="l00140"></a>00140         <span class="keywordtype">void</span>* operator new(size_t s) {
+<a name="l00141"></a>00141             <span class="keywordflow">return</span> tbb::internal::allocate_via_handler_v3(s);
+<a name="l00142"></a>00142         }
+<a name="l00143"></a>00143         <span class="keywordtype">void</span> operator delete(<span class="keywordtype">void</span>* p) {
+<a name="l00144"></a>00144             tbb::internal::deallocate_via_handler_v3(p);
+<a name="l00145"></a>00145         }
+<a name="l00146"></a>00146 
+<a name="l00147"></a>00147     <span class="keyword">private</span>:
+<a name="l00149"></a>00149         <a class="code" href="a00191.html">reader_writer_lock</a> *<a class="code" href="a00177.html">mutex</a>;
+<a name="l00151"></a>00151         scoped_lock_read *next;
+<a name="l00153"></a>00153         <a class="code" href="a00147.html">atomic&lt;status_t&gt;</a> status;
+<a name="l00154"></a>00154 
+<a name="l00156"></a>00156         scoped_lock_read();
+<a name="l00157"></a>00157 
+<a name="l00158"></a>00158         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct(<a class="code" href="a00191.html">reader_writer_lock</a>&amp;);
+<a name="l00159"></a>00159         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_destroy();
+<a name="l00160"></a>00160     };
+<a name="l00161"></a>00161     
+<a name="l00163"></a>00163 
+<a name="l00168"></a>00168     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">lock</a>();
+<a name="l00169"></a>00169 
+<a name="l00171"></a>00171 
+<a name="l00175"></a>00175     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD <a class="code" href="a00191.html#721eb173e154ab38292273e9266a9b07">try_lock</a>();
+<a name="l00176"></a>00176 
+<a name="l00178"></a>00178 
+<a name="l00182"></a>00182     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00191.html#d9d16a24d9f6c3dada73c6b9ff214f5b">lock_read</a>(); 
+<a name="l00183"></a>00183 
+<a name="l00185"></a>00185 
+<a name="l00187"></a>00187     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD <a class="code" href="a00191.html#595fb23952e3b89426b1f7938dea9b11">try_lock_read</a>();
+<a name="l00188"></a>00188 
+<a name="l00190"></a>00190     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00191.html#5113b32689305599b2c36b5831547704">unlock</a>();
+<a name="l00191"></a>00191 
+<a name="l00192"></a>00192  <span class="keyword">private</span>:
+<a name="l00193"></a>00193     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00194"></a>00194     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_destroy();
+<a name="l00195"></a>00195 
+<a name="l00197"></a>00197 
+<a name="l00198"></a>00198     <span class="keywordtype">bool</span> start_write(scoped_lock *);
+<a name="l00200"></a>00200     <span class="keywordtype">void</span> set_next_writer(scoped_lock *w);
+<a name="l00202"></a>00202     <span class="keywordtype">void</span> end_write(scoped_lock *);
+<a name="l00204"></a>00204     <span class="keywordtype">bool</span> is_current_writer();
+<a name="l00205"></a>00205 
+<a name="l00207"></a>00207 
+<a name="l00208"></a>00208     <span class="keywordtype">void</span> start_read(scoped_lock_read *);
+<a name="l00210"></a>00210     <span class="keywordtype">void</span> unblock_readers();
+<a name="l00212"></a>00212     <span class="keywordtype">void</span> end_read();
+<a name="l00213"></a>00213 
+<a name="l00215"></a>00215     atomic&lt;scoped_lock_read*&gt; reader_head;
+<a name="l00217"></a>00217     atomic&lt;scoped_lock*&gt; writer_head;
+<a name="l00219"></a>00219     atomic&lt;scoped_lock*&gt; writer_tail;
+<a name="l00221"></a>00221     tbb_thread::id my_current_writer;
+<a name="l00223"></a>00223     atomic&lt;unsigned&gt; rdr_count_and_flags;
+<a name="l00224"></a>00224 };
+<a name="l00225"></a>00225 
+<a name="l00226"></a>00226 } <span class="comment">// namespace interface5</span>
+<a name="l00227"></a>00227 
+<a name="l00228"></a>00228 <span class="keyword">using</span> interface5::reader_writer_lock;
+<a name="l00229"></a>00229 
+<a name="l00230"></a>00230 } <span class="comment">// namespace tbb</span>
+<a name="l00231"></a>00231 
+<a name="l00232"></a>00232 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_reader_writer_lock_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00434.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00434.html
new file mode 100644 (file)
index 0000000..f04db89
--- /dev/null
@@ -0,0 +1,241 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>recursive_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>recursive_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_recursive_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_recursive_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#else</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span><span class="preprocessor">#include &lt;pthread.h&gt;</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00031"></a>00031 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="keyword">namespace </span>tbb {
+<a name="l00037"></a>00037 
+<a name="l00039"></a><a class="code" href="a00194.html">00039</a> <span class="keyword">class </span><a class="code" href="a00194.html">recursive_mutex</a> {
+<a name="l00040"></a>00040 <span class="keyword">public</span>:
+<a name="l00042"></a><a class="code" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">00042</a>     <a class="code" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">recursive_mutex</a>() {
+<a name="l00043"></a>00043 <span class="preprocessor">#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span>        internal_construct();
+<a name="l00045"></a>00045 <span class="preprocessor">#else</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span>        InitializeCriticalSection(&amp;impl);
+<a name="l00048"></a>00048 <span class="preprocessor">  #else</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span>        pthread_mutexattr_t mtx_attr;
+<a name="l00050"></a>00050         <span class="keywordtype">int</span> error_code = pthread_mutexattr_init( &amp;mtx_attr );
+<a name="l00051"></a>00051         <span class="keywordflow">if</span>( error_code )
+<a name="l00052"></a>00052             tbb::internal::handle_perror(error_code,<span class="stringliteral">"recursive_mutex: pthread_mutexattr_init failed"</span>);
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054         pthread_mutexattr_settype( &amp;mtx_attr, PTHREAD_MUTEX_RECURSIVE );
+<a name="l00055"></a>00055         error_code = pthread_mutex_init( &amp;impl, &amp;mtx_attr );
+<a name="l00056"></a>00056         <span class="keywordflow">if</span>( error_code )
+<a name="l00057"></a>00057             tbb::internal::handle_perror(error_code,<span class="stringliteral">"recursive_mutex: pthread_mutex_init failed"</span>);
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059         pthread_mutexattr_destroy( &amp;mtx_attr );
+<a name="l00060"></a>00060 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64*/</span>
+<a name="l00061"></a>00061 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00062"></a>00062     };
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064     ~<a class="code" href="a00194.html">recursive_mutex</a>() {
+<a name="l00065"></a>00065 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00066"></a>00066 <span class="preprocessor"></span>        internal_destroy();
+<a name="l00067"></a>00067 <span class="preprocessor">#else</span>
+<a name="l00068"></a>00068 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00069"></a>00069 <span class="preprocessor"></span>        DeleteCriticalSection(&amp;impl);
+<a name="l00070"></a>00070 <span class="preprocessor">  #else</span>
+<a name="l00071"></a>00071 <span class="preprocessor"></span>        pthread_mutex_destroy(&amp;impl); 
+<a name="l00072"></a>00072 
+<a name="l00073"></a>00073 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00074"></a>00074 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00075"></a>00075     };
+<a name="l00076"></a>00076 
+<a name="l00077"></a>00077     <span class="keyword">class </span>scoped_lock;
+<a name="l00078"></a>00078     <span class="keyword">friend</span> <span class="keyword">class </span>scoped_lock;
+<a name="l00079"></a>00079 
+<a name="l00081"></a>00081 
+<a name="l00083"></a><a class="code" href="a00195.html">00083</a>     <span class="keyword">class </span><a class="code" href="a00195.html">scoped_lock</a>: internal::no_copy {
+<a name="l00084"></a>00084     <span class="keyword">public</span>:
+<a name="l00086"></a><a class="code" href="a00195.html#d82d4d36fbf9727a493d26ae50855fe7">00086</a>         <a class="code" href="a00195.html#d82d4d36fbf9727a493d26ae50855fe7">scoped_lock</a>() : my_mutex(NULL) {};
+<a name="l00087"></a>00087 
+<a name="l00089"></a><a class="code" href="a00195.html#dec17713c4c1321ac8fec66816d0c602">00089</a>         <a class="code" href="a00195.html#d82d4d36fbf9727a493d26ae50855fe7">scoped_lock</a>( <a class="code" href="a00194.html">recursive_mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00090"></a>00090 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00091"></a>00091 <span class="preprocessor"></span>            my_mutex = &amp;mutex; 
+<a name="l00092"></a>00092 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00093"></a>00093             <a class="code" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">acquire</a>( mutex );
+<a name="l00094"></a>00094         }
+<a name="l00095"></a>00095 
+<a name="l00097"></a><a class="code" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">00097</a>         <a class="code" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">~scoped_lock</a>() {
+<a name="l00098"></a>00098             <span class="keywordflow">if</span>( my_mutex ) 
+<a name="l00099"></a>00099                 <a class="code" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">release</a>();
+<a name="l00100"></a>00100         }
+<a name="l00101"></a>00101 
+<a name="l00103"></a><a class="code" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">00103</a>         <span class="keywordtype">void</span> <a class="code" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">acquire</a>( <a class="code" href="a00194.html">recursive_mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00104"></a>00104 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00105"></a>00105 <span class="preprocessor"></span>            internal_acquire( mutex );
+<a name="l00106"></a>00106 <span class="preprocessor">#else</span>
+<a name="l00107"></a>00107 <span class="preprocessor"></span>            my_mutex = &amp;mutex;
+<a name="l00108"></a>00108             mutex.<a class="code" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">lock</a>();
+<a name="l00109"></a>00109 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00110"></a>00110         }
+<a name="l00111"></a>00111 
+<a name="l00113"></a><a class="code" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">00113</a>         <span class="keywordtype">bool</span> <a class="code" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">try_acquire</a>( <a class="code" href="a00194.html">recursive_mutex</a>&amp; <a class="code" href="a00177.html">mutex</a> ) {
+<a name="l00114"></a>00114 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span>            <span class="keywordflow">return</span> internal_try_acquire( mutex );
+<a name="l00116"></a>00116 <span class="preprocessor">#else</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>            <span class="keywordtype">bool</span> result = mutex.<a class="code" href="a00194.html#86e719b0afee25704af11ab97694d240">try_lock</a>();
+<a name="l00118"></a>00118             <span class="keywordflow">if</span>( result )
+<a name="l00119"></a>00119                 my_mutex = &amp;mutex;
+<a name="l00120"></a>00120             <span class="keywordflow">return</span> result;
+<a name="l00121"></a>00121 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00122"></a>00122         }
+<a name="l00123"></a>00123 
+<a name="l00125"></a><a class="code" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">00125</a>         <span class="keywordtype">void</span> <a class="code" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">release</a>() {
+<a name="l00126"></a>00126 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00127"></a>00127 <span class="preprocessor"></span>            internal_release();
+<a name="l00128"></a>00128 <span class="preprocessor">#else</span>
+<a name="l00129"></a>00129 <span class="preprocessor"></span>            my_mutex-&gt;<a class="code" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">unlock</a>();
+<a name="l00130"></a>00130             my_mutex = NULL;
+<a name="l00131"></a>00131 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00132"></a>00132         }
+<a name="l00133"></a>00133 
+<a name="l00134"></a>00134     <span class="keyword">private</span>:
+<a name="l00136"></a>00136         <a class="code" href="a00194.html">recursive_mutex</a>* my_mutex;
+<a name="l00137"></a>00137 
+<a name="l00139"></a>00139         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_acquire( <a class="code" href="a00194.html">recursive_mutex</a>&amp; m );
+<a name="l00140"></a>00140 
+<a name="l00142"></a>00142         <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_try_acquire( <a class="code" href="a00194.html">recursive_mutex</a>&amp; m );
+<a name="l00143"></a>00143 
+<a name="l00145"></a>00145         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_release();
+<a name="l00146"></a>00146 
+<a name="l00147"></a>00147         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00194.html">recursive_mutex</a>;
+<a name="l00148"></a>00148     };
+<a name="l00149"></a>00149 
+<a name="l00150"></a>00150     <span class="comment">// Mutex traits</span>
+<a name="l00151"></a>00151     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;
+<a name="l00152"></a>00152     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">true</span>;
+<a name="l00153"></a>00153     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">false</span>;
+<a name="l00154"></a>00154 
+<a name="l00155"></a>00155     <span class="comment">// C++0x compatibility interface</span>
+<a name="l00156"></a>00156     
+<a name="l00158"></a><a class="code" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">00158</a>     <span class="keywordtype">void</span> <a class="code" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">lock</a>() {
+<a name="l00159"></a>00159 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00160"></a>00160 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00161"></a>00161         <span class="keyword">new</span>(tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()) <a class="code" href="a00195.html">scoped_lock</a>(*<span class="keyword">this</span>);
+<a name="l00162"></a>00162 <span class="preprocessor">#else</span>
+<a name="l00163"></a>00163 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00164"></a>00164 <span class="preprocessor"></span>        EnterCriticalSection(&amp;impl);
+<a name="l00165"></a>00165 <span class="preprocessor">  #else</span>
+<a name="l00166"></a>00166 <span class="preprocessor"></span>        pthread_mutex_lock(&amp;impl);
+<a name="l00167"></a>00167 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00168"></a>00168 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00169"></a>00169     }
+<a name="l00170"></a>00170 
+<a name="l00172"></a>00172 
+<a name="l00173"></a><a class="code" href="a00194.html#86e719b0afee25704af11ab97694d240">00173</a>     <span class="keywordtype">bool</span> <a class="code" href="a00194.html#86e719b0afee25704af11ab97694d240">try_lock</a>() {
+<a name="l00174"></a>00174 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00175"></a>00175 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00176"></a>00176         <span class="keywordflow">return</span> (<span class="keyword">new</span>(tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()) <a class="code" href="a00195.html">scoped_lock</a>)-&gt;internal_try_acquire(*<span class="keyword">this</span>);
+<a name="l00177"></a>00177 <span class="preprocessor">#else        </span>
+<a name="l00178"></a>00178 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00179"></a>00179 <span class="preprocessor"></span>        <span class="keywordflow">return</span> TryEnterCriticalSection(&amp;impl)!=0;
+<a name="l00180"></a>00180 <span class="preprocessor">  #else</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span>        <span class="keywordflow">return</span> pthread_mutex_trylock(&amp;impl)==0;
+<a name="l00182"></a>00182 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00183"></a>00183 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00184"></a>00184     }
+<a name="l00185"></a>00185 
+<a name="l00187"></a><a class="code" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">00187</a>     <span class="keywordtype">void</span> <a class="code" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">unlock</a>() {
+<a name="l00188"></a>00188 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00189"></a>00189 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00190"></a>00190         <a class="code" href="a00195.html">scoped_lock</a>&amp; s = *tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>();
+<a name="l00191"></a>00191         s.<a class="code" href="a00195.html#3efd10a49e2435ea5215b49907f93ba2">my_mutex</a> = <span class="keyword">this</span>;
+<a name="l00192"></a>00192         s.<a class="code" href="a00195.html#43ed37cec508072e53a52113c8040137">internal_release</a>();
+<a name="l00193"></a>00193 <span class="preprocessor">#else</span>
+<a name="l00194"></a>00194 <span class="preprocessor"></span><span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00195"></a>00195 <span class="preprocessor"></span>        LeaveCriticalSection(&amp;impl);
+<a name="l00196"></a>00196 <span class="preprocessor">  #else</span>
+<a name="l00197"></a>00197 <span class="preprocessor"></span>        pthread_mutex_unlock(&amp;impl);
+<a name="l00198"></a>00198 <span class="preprocessor">  #endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00199"></a>00199 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00200"></a>00200     }
+<a name="l00201"></a>00201 
+<a name="l00203"></a>00203 <span class="preprocessor">  #if _WIN32||_WIN64</span>
+<a name="l00204"></a><a class="code" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">00204</a> <span class="preprocessor"></span>    <span class="keyword">typedef</span> LPCRITICAL_SECTION <a class="code" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a>;
+<a name="l00205"></a>00205 <span class="preprocessor">  #else</span>
+<a name="l00206"></a>00206 <span class="preprocessor"></span>    <span class="keyword">typedef</span> pthread_mutex_t* <a class="code" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a>;
+<a name="l00207"></a>00207 <span class="preprocessor">  #endif</span>
+<a name="l00208"></a>00208 <span class="preprocessor"></span>    <a class="code" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a> native_handle() { <span class="keywordflow">return</span> (<a class="code" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">native_handle_type</a>) &amp;impl; }
+<a name="l00209"></a>00209 
+<a name="l00210"></a>00210 <span class="keyword">private</span>:
+<a name="l00211"></a>00211 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00212"></a>00212 <span class="preprocessor"></span>    CRITICAL_SECTION impl;
+<a name="l00213"></a>00213     <span class="keyword">enum</span> state_t {
+<a name="l00214"></a>00214         INITIALIZED=0x1234,
+<a name="l00215"></a>00215         DESTROYED=0x789A,
+<a name="l00216"></a>00216     } state;
+<a name="l00217"></a>00217 <span class="preprocessor">#else</span>
+<a name="l00218"></a>00218 <span class="preprocessor"></span>    pthread_mutex_t impl;
+<a name="l00219"></a>00219 <span class="preprocessor">#endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00220"></a>00220 
+<a name="l00222"></a>00222     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00223"></a>00223 
+<a name="l00225"></a>00225     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_destroy();
+<a name="l00226"></a>00226 };
+<a name="l00227"></a>00227 
+<a name="l00228"></a>00228 __TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex)
+<a name="l00229"></a>00229 
+<a name="l00230"></a>00230 } <span class="comment">// namespace tbb </span>
+<a name="l00231"></a>00231 
+<a name="l00232"></a>00232 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_recursive_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00435.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00435.html
new file mode 100644 (file)
index 0000000..4cbdd04
--- /dev/null
@@ -0,0 +1,199 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>scalable_allocator.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>scalable_allocator.h</h1><a href="a00253.html">Go to the documentation of this file.</a><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_scalable_allocator_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_scalable_allocator_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include &lt;stddef.h&gt;</span> <span class="comment">/* Need ptrdiff_t and size_t from here. */</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="preprocessor">#if !defined(__cplusplus) &amp;&amp; __ICC==1100</span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 991)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#ifdef __cplusplus</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span><span class="keyword">extern</span> <span class="stringliteral">"C"</span> {
+<a name="l00034"></a>00034 <span class="preprocessor">#endif </span><span class="comment">/* __cplusplus */</span>
+<a name="l00035"></a>00035 
+<a name="l00036"></a>00036 <span class="preprocessor">#if _MSC_VER &gt;= 1400</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_FUNC   __cdecl</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00039"></a>00039 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_FUNC</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span>
+<a name="l00044"></a>00044 <span class="keywordtype">void</span> * __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_malloc</a> (size_t size);
+<a name="l00045"></a>00045 
+<a name="l00048"></a>00048 <span class="keywordtype">void</span>   __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_free</a> (<span class="keywordtype">void</span>* ptr);
+<a name="l00049"></a>00049 
+<a name="l00052"></a>00052 <span class="keywordtype">void</span> * __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#g951bbbbd2d041acb59ba5fa910b52543">scalable_realloc</a> (<span class="keywordtype">void</span>* ptr, size_t size);
+<a name="l00053"></a>00053 
+<a name="l00056"></a>00056 <span class="keywordtype">void</span> * __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#g3f5a2fde0bcaa3eda35be32c8658f444">scalable_calloc</a> (size_t nobj, size_t size);
+<a name="l00057"></a>00057 
+<a name="l00060"></a>00060 <span class="keywordtype">int</span> __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#g05dcec987480bb2c82ecdead6a085899">scalable_posix_memalign</a> (<span class="keywordtype">void</span>** memptr, size_t alignment, size_t size);
+<a name="l00061"></a>00061 
+<a name="l00064"></a>00064 <span class="keywordtype">void</span> * __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#gc1c7aaa1fe85c17ba5a3a96f7e8d89e7">scalable_aligned_malloc</a> (size_t size, size_t alignment);
+<a name="l00065"></a>00065 
+<a name="l00068"></a>00068 <span class="keywordtype">void</span> * __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#gbaea91376be80dfabd7c93eaffd9abaa">scalable_aligned_realloc</a> (<span class="keywordtype">void</span>* ptr, size_t size, size_t alignment);
+<a name="l00069"></a>00069 
+<a name="l00072"></a>00072 <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#g65a20e812012f15ec7442d5b45d0cba5">scalable_aligned_free</a> (<span class="keywordtype">void</span>* ptr);
+<a name="l00073"></a>00073 
+<a name="l00078"></a>00078 size_t __TBB_EXPORTED_FUNC <a class="code" href="a00282.html#g0965ce1b4b7835f92869c7fd867265f7">scalable_msize</a> (<span class="keywordtype">void</span>* ptr);
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080 <span class="preprocessor">#ifdef __cplusplus</span>
+<a name="l00081"></a>00081 <span class="preprocessor"></span>} <span class="comment">/* extern "C" */</span>
+<a name="l00082"></a>00082 <span class="preprocessor">#endif </span><span class="comment">/* __cplusplus */</span>
+<a name="l00083"></a>00083 
+<a name="l00084"></a>00084 <span class="preprocessor">#ifdef __cplusplus</span>
+<a name="l00085"></a>00085 <span class="preprocessor"></span>
+<a name="l00086"></a>00086 <span class="preprocessor">#include &lt;new&gt;</span>      <span class="comment">/* To use new with the placement argument */</span>
+<a name="l00087"></a>00087 
+<a name="l00088"></a>00088 <span class="comment">/* Ensure that including this header does not cause implicit linkage with TBB */</span>
+<a name="l00089"></a>00089 <span class="preprocessor">#ifndef __TBB_NO_IMPLICIT_LINKAGE</span>
+<a name="l00090"></a>00090 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_NO_IMPLICIT_LINKAGE 1</span>
+<a name="l00091"></a>00091 <span class="preprocessor"></span><span class="preprocessor">    #include "tbb_stddef.h"</span>
+<a name="l00092"></a>00092 <span class="preprocessor">    #undef  __TBB_NO_IMPLICIT_LINKAGE</span>
+<a name="l00093"></a>00093 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00094"></a>00094 <span class="preprocessor"></span><span class="preprocessor">    #include "tbb_stddef.h"</span>
+<a name="l00095"></a>00095 <span class="preprocessor">#endif</span>
+<a name="l00096"></a>00096 <span class="preprocessor"></span>
+<a name="l00097"></a>00097 
+<a name="l00098"></a>00098 <span class="keyword">namespace </span>tbb {
+<a name="l00099"></a>00099 
+<a name="l00100"></a>00100 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00101"></a>00101 <span class="preprocessor"></span>    <span class="comment">// Workaround for erroneous "unreferenced parameter" warning in method destroy.</span>
+<a name="l00102"></a>00102 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00103"></a>00103 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4100)</span>
+<a name="l00104"></a>00104 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00105"></a>00105 <span class="preprocessor"></span>
+<a name="l00107"></a>00107 
+<a name="l00110"></a>00110 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00111"></a><a class="code" href="a00196.html">00111</a> <span class="keyword">class </span><a class="code" href="a00196.html">scalable_allocator</a> {
+<a name="l00112"></a>00112 <span class="keyword">public</span>:
+<a name="l00113"></a>00113     <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::allocator_type&lt;T&gt;::value_type value_type;
+<a name="l00114"></a>00114     <span class="keyword">typedef</span> value_type* pointer;
+<a name="l00115"></a>00115     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type* const_pointer;
+<a name="l00116"></a>00116     <span class="keyword">typedef</span> value_type&amp; reference;
+<a name="l00117"></a>00117     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type&amp; const_reference;
+<a name="l00118"></a>00118     <span class="keyword">typedef</span> size_t size_type;
+<a name="l00119"></a>00119     <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00120"></a>00120     <span class="keyword">template</span>&lt;<span class="keyword">class</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00121"></a>00121         <span class="keyword">typedef</span> <a class="code" href="a00196.html">scalable_allocator&lt;U&gt;</a> other;
+<a name="l00122"></a>00122     };
+<a name="l00123"></a>00123 
+<a name="l00124"></a>00124     <a class="code" href="a00196.html">scalable_allocator</a>() <span class="keywordflow">throw</span>() {}
+<a name="l00125"></a>00125     <a class="code" href="a00196.html">scalable_allocator</a>( <span class="keyword">const</span> <a class="code" href="a00196.html">scalable_allocator</a>&amp; ) <span class="keywordflow">throw</span>() {}
+<a name="l00126"></a>00126     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <a class="code" href="a00196.html">scalable_allocator</a>(<span class="keyword">const</span> <a class="code" href="a00196.html">scalable_allocator&lt;U&gt;</a>&amp;) <span class="keywordflow">throw</span>() {}
+<a name="l00127"></a>00127 
+<a name="l00128"></a>00128     pointer address(reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00129"></a>00129     const_pointer address(const_reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00130"></a>00130 
+<a name="l00132"></a><a class="code" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">00132</a>     pointer <a class="code" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">allocate</a>( size_type n, <span class="keyword">const</span> <span class="keywordtype">void</span>* <span class="comment">/*hint*/</span> =0 ) {
+<a name="l00133"></a>00133         <span class="keywordflow">return</span> static_cast&lt;pointer&gt;( <a class="code" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_malloc</a>( n * <span class="keyword">sizeof</span>(value_type) ) );
+<a name="l00134"></a>00134     }
+<a name="l00135"></a>00135 
+<a name="l00137"></a><a class="code" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">00137</a>     <span class="keywordtype">void</span> <a class="code" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">deallocate</a>( pointer p, size_type ) {
+<a name="l00138"></a>00138         <a class="code" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_free</a>( p );
+<a name="l00139"></a>00139     }
+<a name="l00140"></a>00140 
+<a name="l00142"></a><a class="code" href="a00196.html#880e766f1d913988c21973dbdd874fd5">00142</a>     size_type <a class="code" href="a00196.html#880e766f1d913988c21973dbdd874fd5">max_size</a>() const throw() {
+<a name="l00143"></a>00143         size_type absolutemax = static_cast&lt;size_type&gt;(-1) / <span class="keyword">sizeof</span> (value_type);
+<a name="l00144"></a>00144         <span class="keywordflow">return</span> (absolutemax &gt; 0 ? absolutemax : 1);
+<a name="l00145"></a>00145     }
+<a name="l00146"></a>00146     <span class="keywordtype">void</span> construct( pointer p, <span class="keyword">const</span> value_type&amp; value ) {::new((<span class="keywordtype">void</span>*)(p)) value_type(value);}
+<a name="l00147"></a>00147     <span class="keywordtype">void</span> destroy( pointer p ) {p-&gt;~value_type();}
+<a name="l00148"></a>00148 };
+<a name="l00149"></a>00149 
+<a name="l00150"></a>00150 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00151"></a>00151 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00152"></a>00152 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4100 is back</span>
+<a name="l00153"></a>00153 <span class="preprocessor"></span>
+<a name="l00155"></a>00155 
+<a name="l00156"></a>00156 <span class="keyword">template</span>&lt;&gt;
+<a name="l00157"></a><a class="code" href="a00197.html">00157</a> <span class="keyword">class </span><a class="code" href="a00196.html">scalable_allocator</a>&lt;void&gt; {
+<a name="l00158"></a>00158 <span class="keyword">public</span>:
+<a name="l00159"></a>00159     <span class="keyword">typedef</span> <span class="keywordtype">void</span>* pointer;
+<a name="l00160"></a>00160     <span class="keyword">typedef</span> <span class="keyword">const</span> <span class="keywordtype">void</span>* const_pointer;
+<a name="l00161"></a>00161     <span class="keyword">typedef</span> <span class="keywordtype">void</span> value_type;
+<a name="l00162"></a>00162     <span class="keyword">template</span>&lt;<span class="keyword">class</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00163"></a>00163         <span class="keyword">typedef</span> <a class="code" href="a00196.html">scalable_allocator&lt;U&gt;</a> other;
+<a name="l00164"></a>00164     };
+<a name="l00165"></a>00165 };
+<a name="l00166"></a>00166 
+<a name="l00167"></a>00167 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00168"></a>00168 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> <a class="code" href="a00196.html">scalable_allocator&lt;T&gt;</a>&amp;, <span class="keyword">const</span> <a class="code" href="a00196.html">scalable_allocator&lt;U&gt;</a>&amp; ) {<span class="keywordflow">return</span> <span class="keyword">true</span>;}
+<a name="l00169"></a>00169 
+<a name="l00170"></a>00170 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00171"></a>00171 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> scalable_allocator&lt;T&gt;&amp;, <span class="keyword">const</span> scalable_allocator&lt;U&gt;&amp; ) {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00172"></a>00172 
+<a name="l00173"></a>00173 } <span class="comment">// namespace tbb</span>
+<a name="l00174"></a>00174 
+<a name="l00175"></a>00175 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00176"></a>00176 <span class="preprocessor"></span><span class="preprocessor">    #if __TBB_BUILD &amp;&amp; !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE)</span>
+<a name="l00177"></a>00177 <span class="preprocessor"></span><span class="preprocessor">        #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1</span>
+<a name="l00178"></a>00178 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00179"></a>00179 <span class="preprocessor"></span>
+<a name="l00180"></a>00180 <span class="preprocessor">    #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span><span class="preprocessor">        #ifdef _DEBUG</span>
+<a name="l00182"></a>00182 <span class="preprocessor"></span><span class="preprocessor">            #pragma comment(lib, "tbbmalloc_debug.lib")</span>
+<a name="l00183"></a>00183 <span class="preprocessor"></span><span class="preprocessor">        #else</span>
+<a name="l00184"></a>00184 <span class="preprocessor"></span><span class="preprocessor">            #pragma comment(lib, "tbbmalloc.lib")</span>
+<a name="l00185"></a>00185 <span class="preprocessor"></span><span class="preprocessor">        #endif</span>
+<a name="l00186"></a>00186 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00187"></a>00187 <span class="preprocessor"></span>
+<a name="l00188"></a>00188 
+<a name="l00189"></a>00189 <span class="preprocessor">#endif</span>
+<a name="l00190"></a>00190 <span class="preprocessor"></span>
+<a name="l00191"></a>00191 <span class="preprocessor">#endif </span><span class="comment">/* __cplusplus */</span>
+<a name="l00192"></a>00192 
+<a name="l00193"></a>00193 <span class="preprocessor">#if !defined(__cplusplus) &amp;&amp; __ICC==1100</span>
+<a name="l00194"></a>00194 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00195"></a>00195 <span class="preprocessor"></span><span class="preprocessor">#endif // ICC 11.0 warning 991 is back</span>
+<a name="l00196"></a>00196 <span class="preprocessor"></span>
+<a name="l00197"></a>00197 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_scalable_allocator_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00436.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00436.html
new file mode 100644 (file)
index 0000000..827b4bb
--- /dev/null
@@ -0,0 +1,192 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>spin_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>spin_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_spin_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_spin_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include &lt;cstddef&gt;</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00027"></a>00027 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00030"></a>00030 
+<a name="l00031"></a>00031 <span class="keyword">namespace </span>tbb {
+<a name="l00032"></a>00032 
+<a name="l00034"></a>00034 
+<a name="l00039"></a><a class="code" href="a00199.html">00039</a> <span class="keyword">class </span><a class="code" href="a00199.html">spin_mutex</a> {
+<a name="l00041"></a>00041     <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> flag;
+<a name="l00042"></a>00042 
+<a name="l00043"></a>00043 <span class="keyword">public</span>:
+<a name="l00045"></a>00045 
+<a name="l00046"></a><a class="code" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">00046</a>     <a class="code" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">spin_mutex</a>() : flag(0) {
+<a name="l00047"></a>00047 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span>        internal_construct();
+<a name="l00049"></a>00049 <span class="preprocessor">#endif</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span>    }
+<a name="l00051"></a>00051 
+<a name="l00053"></a><a class="code" href="a00200.html">00053</a>     <span class="keyword">class </span><a class="code" href="a00200.html">scoped_lock</a> : internal::no_copy {
+<a name="l00054"></a>00054     <span class="keyword">private</span>:
+<a name="l00056"></a>00056         <a class="code" href="a00199.html">spin_mutex</a>* my_mutex; 
+<a name="l00057"></a>00057 
+<a name="l00059"></a>00059         uintptr_t my_unlock_value;
+<a name="l00060"></a>00060 
+<a name="l00062"></a>00062         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_acquire( <a class="code" href="a00199.html">spin_mutex</a>&amp; m );
+<a name="l00063"></a>00063 
+<a name="l00065"></a>00065         <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_try_acquire( <a class="code" href="a00199.html">spin_mutex</a>&amp; m );
+<a name="l00066"></a>00066 
+<a name="l00068"></a>00068         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_release();
+<a name="l00069"></a>00069 
+<a name="l00070"></a>00070         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00199.html">spin_mutex</a>;
+<a name="l00071"></a>00071 
+<a name="l00072"></a>00072     <span class="keyword">public</span>:
+<a name="l00074"></a><a class="code" href="a00200.html#29ae680ae7f5e685c2e15535b9c855b3">00074</a>         <a class="code" href="a00200.html#29ae680ae7f5e685c2e15535b9c855b3">scoped_lock</a>() : my_mutex(NULL), my_unlock_value(0) {}
+<a name="l00075"></a>00075 
+<a name="l00077"></a><a class="code" href="a00200.html#5ce6807050a9e8f87bcb4a65dccb12ef">00077</a>         <a class="code" href="a00200.html#29ae680ae7f5e685c2e15535b9c855b3">scoped_lock</a>( <a class="code" href="a00199.html">spin_mutex</a>&amp; m ) { 
+<a name="l00078"></a>00078 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00079"></a>00079 <span class="preprocessor"></span>            my_mutex=NULL;
+<a name="l00080"></a>00080             internal_acquire(m);
+<a name="l00081"></a>00081 <span class="preprocessor">#else</span>
+<a name="l00082"></a>00082 <span class="preprocessor"></span>            my_unlock_value = __TBB_LockByte(m.<a class="code" href="a00199.html#81b0b05fb71280ce0c79708e82569d86">flag</a>);
+<a name="l00083"></a>00083             my_mutex=&amp;m;
+<a name="l00084"></a>00084 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/</span>
+<a name="l00085"></a>00085         }
+<a name="l00086"></a>00086 
+<a name="l00088"></a><a class="code" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">00088</a>         <span class="keywordtype">void</span> <a class="code" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">acquire</a>( <a class="code" href="a00199.html">spin_mutex</a>&amp; m ) {
+<a name="l00089"></a>00089 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00090"></a>00090 <span class="preprocessor"></span>            internal_acquire(m);
+<a name="l00091"></a>00091 <span class="preprocessor">#else</span>
+<a name="l00092"></a>00092 <span class="preprocessor"></span>            my_unlock_value = __TBB_LockByte(m.<a class="code" href="a00199.html#81b0b05fb71280ce0c79708e82569d86">flag</a>);
+<a name="l00093"></a>00093             my_mutex = &amp;m;
+<a name="l00094"></a>00094 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/</span>
+<a name="l00095"></a>00095         }
+<a name="l00096"></a>00096 
+<a name="l00098"></a>00098 
+<a name="l00099"></a><a class="code" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">00099</a>         <span class="keywordtype">bool</span> <a class="code" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">try_acquire</a>( <a class="code" href="a00199.html">spin_mutex</a>&amp; m ) {
+<a name="l00100"></a>00100 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00101"></a>00101 <span class="preprocessor"></span>            <span class="keywordflow">return</span> internal_try_acquire(m);
+<a name="l00102"></a>00102 <span class="preprocessor">#else</span>
+<a name="l00103"></a>00103 <span class="preprocessor"></span>            <span class="keywordtype">bool</span> result = __TBB_TryLockByte(m.<a class="code" href="a00199.html#81b0b05fb71280ce0c79708e82569d86">flag</a>);
+<a name="l00104"></a>00104             <span class="keywordflow">if</span>( result ) {
+<a name="l00105"></a>00105                 my_unlock_value = 0;
+<a name="l00106"></a>00106                 my_mutex = &amp;m;
+<a name="l00107"></a>00107             }
+<a name="l00108"></a>00108             <span class="keywordflow">return</span> result;
+<a name="l00109"></a>00109 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/</span>
+<a name="l00110"></a>00110         }
+<a name="l00111"></a>00111 
+<a name="l00113"></a><a class="code" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">00113</a>         <span class="keywordtype">void</span> <a class="code" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">release</a>() {
+<a name="l00114"></a>00114 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span>            internal_release();
+<a name="l00116"></a>00116 <span class="preprocessor">#else</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>            __TBB_store_with_release(my_mutex-&gt;<a class="code" href="a00199.html#81b0b05fb71280ce0c79708e82569d86">flag</a>, static_cast&lt;unsigned char&gt;(my_unlock_value));
+<a name="l00118"></a>00118             my_mutex = NULL;
+<a name="l00119"></a>00119 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00120"></a>00120         }
+<a name="l00121"></a>00121 
+<a name="l00123"></a><a class="code" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">00123</a>         <a class="code" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">~scoped_lock</a>() {
+<a name="l00124"></a>00124             <span class="keywordflow">if</span>( my_mutex ) {
+<a name="l00125"></a>00125 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00126"></a>00126 <span class="preprocessor"></span>                internal_release();
+<a name="l00127"></a>00127 <span class="preprocessor">#else</span>
+<a name="l00128"></a>00128 <span class="preprocessor"></span>                __TBB_store_with_release(my_mutex-&gt;<a class="code" href="a00199.html#81b0b05fb71280ce0c79708e82569d86">flag</a>, static_cast&lt;unsigned char&gt;(my_unlock_value));
+<a name="l00129"></a>00129 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00130"></a>00130             }
+<a name="l00131"></a>00131         }
+<a name="l00132"></a>00132     };
+<a name="l00133"></a>00133 
+<a name="l00134"></a>00134     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00135"></a>00135 
+<a name="l00136"></a>00136     <span class="comment">// Mutex traits</span>
+<a name="l00137"></a>00137     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">false</span>;
+<a name="l00138"></a>00138     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00139"></a>00139     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">false</span>;
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141     <span class="comment">// ISO C++0x compatibility methods</span>
+<a name="l00142"></a>00142 
+<a name="l00144"></a><a class="code" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">00144</a>     <span class="keywordtype">void</span> <a class="code" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">lock</a>() {
+<a name="l00145"></a>00145 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00146"></a>00146 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00147"></a>00147         <span class="keyword">new</span>(tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()) <a class="code" href="a00200.html">scoped_lock</a>(*<span class="keyword">this</span>);
+<a name="l00148"></a>00148 <span class="preprocessor">#else</span>
+<a name="l00149"></a>00149 <span class="preprocessor"></span>        __TBB_LockByte(flag);
+<a name="l00150"></a>00150 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS*/</span>
+<a name="l00151"></a>00151     }
+<a name="l00152"></a>00152 
+<a name="l00154"></a>00154 
+<a name="l00155"></a><a class="code" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">00155</a>     <span class="keywordtype">bool</span> <a class="code" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">try_lock</a>() {
+<a name="l00156"></a>00156 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00157"></a>00157 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00158"></a>00158         <span class="keywordflow">return</span> (<span class="keyword">new</span>(tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>()) <a class="code" href="a00200.html">scoped_lock</a>)-&gt;internal_try_acquire(*<span class="keyword">this</span>);
+<a name="l00159"></a>00159 <span class="preprocessor">#else</span>
+<a name="l00160"></a>00160 <span class="preprocessor"></span>        <span class="keywordflow">return</span> __TBB_TryLockByte(flag);
+<a name="l00161"></a>00161 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS*/</span>
+<a name="l00162"></a>00162     }
+<a name="l00163"></a>00163 
+<a name="l00165"></a><a class="code" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">00165</a>     <span class="keywordtype">void</span> <a class="code" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">unlock</a>() {
+<a name="l00166"></a>00166 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00167"></a>00167 <span class="preprocessor"></span>        <a class="code" href="a00146.html">aligned_space&lt;scoped_lock,1&gt;</a> tmp;
+<a name="l00168"></a>00168         <a class="code" href="a00200.html">scoped_lock</a>&amp; s = *tmp.<a class="code" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">begin</a>();
+<a name="l00169"></a>00169         s.<a class="code" href="a00200.html#d968921bed018503214f36e09007ee7b">my_mutex</a> = <span class="keyword">this</span>;
+<a name="l00170"></a>00170         s.<a class="code" href="a00200.html#43a6f8977cd1ed2752f417f3ad9fc4af">my_unlock_value</a> = 0;
+<a name="l00171"></a>00171         s.<a class="code" href="a00200.html#8f4d19aa2d6d112034d281eed0dab5fa">internal_release</a>();
+<a name="l00172"></a>00172 <span class="preprocessor">#else</span>
+<a name="l00173"></a>00173 <span class="preprocessor"></span>        __TBB_store_with_release(flag, 0);
+<a name="l00174"></a>00174 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS */</span>
+<a name="l00175"></a>00175     }
+<a name="l00176"></a>00176 
+<a name="l00177"></a>00177     <span class="keyword">friend</span> <span class="keyword">class </span>scoped_lock;
+<a name="l00178"></a>00178 };
+<a name="l00179"></a>00179 
+<a name="l00180"></a>00180 __TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
+<a name="l00181"></a>00181 
+<a name="l00182"></a>00182 } <span class="comment">// namespace tbb</span>
+<a name="l00183"></a>00183 
+<a name="l00184"></a>00184 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_spin_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00437.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00437.html
new file mode 100644 (file)
index 0000000..193765e
--- /dev/null
@@ -0,0 +1,217 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>spin_rw_mutex.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>spin_rw_mutex.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_spin_rw_mutex_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_spin_rw_mutex_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#include "tbb_profiling.h"</span>
+<a name="l00027"></a>00027 
+<a name="l00028"></a>00028 <span class="keyword">namespace </span>tbb {
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="keyword">class </span>spin_rw_mutex_v3;
+<a name="l00031"></a>00031 <span class="keyword">typedef</span> spin_rw_mutex_v3 spin_rw_mutex;
+<a name="l00032"></a>00032 
+<a name="l00034"></a>00034 
+<a name="l00035"></a><a class="code" href="a00201.html">00035</a> <span class="keyword">class </span><a class="code" href="a00201.html">spin_rw_mutex_v3</a> {
+<a name="l00037"></a>00037 
+<a name="l00039"></a>00039     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_acquire_writer();
+<a name="l00040"></a>00040 
+<a name="l00042"></a>00042 
+<a name="l00043"></a>00043     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_release_writer();
+<a name="l00044"></a>00044 
+<a name="l00046"></a>00046     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_acquire_reader();
+<a name="l00047"></a>00047 
+<a name="l00049"></a>00049     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_upgrade();
+<a name="l00050"></a>00050 
+<a name="l00052"></a>00052 
+<a name="l00053"></a>00053     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_downgrade();
+<a name="l00054"></a>00054 
+<a name="l00056"></a>00056     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_release_reader();
+<a name="l00057"></a>00057 
+<a name="l00059"></a>00059     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_try_acquire_writer();
+<a name="l00060"></a>00060 
+<a name="l00062"></a>00062     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD internal_try_acquire_reader();
+<a name="l00063"></a>00063 
+<a name="l00065"></a>00065 <span class="keyword">public</span>:
+<a name="l00067"></a><a class="code" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">00067</a>     <a class="code" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">spin_rw_mutex_v3</a>() : state(0) {
+<a name="l00068"></a>00068 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS</span>
+<a name="l00069"></a>00069 <span class="preprocessor"></span>        internal_construct();
+<a name="l00070"></a>00070 <span class="preprocessor">#endif</span>
+<a name="l00071"></a>00071 <span class="preprocessor"></span>    }
+<a name="l00072"></a>00072 
+<a name="l00073"></a>00073 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00075"></a><a class="code" href="a00201.html#9a815fb2759e55072ed413f1b6970cf3">00075</a> <span class="preprocessor">    ~spin_rw_mutex_v3() {</span>
+<a name="l00076"></a>00076 <span class="preprocessor"></span>        __TBB_ASSERT( !state, <span class="stringliteral">"destruction of an acquired mutex"</span>);
+<a name="l00077"></a>00077     };
+<a name="l00078"></a>00078 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00079"></a>00079 
+<a name="l00081"></a>00081 
+<a name="l00083"></a><a class="code" href="a00202.html">00083</a>     <span class="keyword">class </span><a class="code" href="a00202.html">scoped_lock</a> : internal::no_copy {
+<a name="l00084"></a>00084     <span class="keyword">public</span>:
+<a name="l00086"></a>00086 
+<a name="l00087"></a><a class="code" href="a00202.html#d6ea60dee5563f702379bf5e51aa8806">00087</a>         <a class="code" href="a00202.html">scoped_lock</a>() : <a class="code" href="a00177.html">mutex</a>(NULL), is_writer(false) {}
+<a name="l00088"></a>00088 
+<a name="l00090"></a><a class="code" href="a00202.html#42a92d4f8fdde425b111cfa8a9228071">00090</a>         <a class="code" href="a00202.html">scoped_lock</a>( <a class="code" href="a00201.html">spin_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write = <span class="keyword">true</span> ) : <a class="code" href="a00177.html">mutex</a>(NULL) {
+<a name="l00091"></a>00091             <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>(m, write);
+<a name="l00092"></a>00092         }
+<a name="l00093"></a>00093 
+<a name="l00095"></a><a class="code" href="a00202.html#d7eaaa3f2e2c5dc11e7005811b1bdd04">00095</a>         <a class="code" href="a00192.html#70246e0260493625ff956fa5926fc71f">~scoped_lock</a>() {
+<a name="l00096"></a>00096             <span class="keywordflow">if</span>( <a class="code" href="a00177.html">mutex</a> ) <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>();
+<a name="l00097"></a>00097         }
+<a name="l00098"></a>00098 
+<a name="l00100"></a><a class="code" href="a00202.html#b0b646ec5be02a127d159bbb7ca65353">00100</a>         <span class="keywordtype">void</span> <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">acquire</a>( <a class="code" href="a00201.html">spin_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write = <span class="keyword">true</span> ) {
+<a name="l00101"></a>00101             __TBB_ASSERT( !<a class="code" href="a00177.html">mutex</a>, <span class="stringliteral">"holding mutex already"</span> );
+<a name="l00102"></a>00102             is_writer = write; 
+<a name="l00103"></a>00103             <a class="code" href="a00177.html">mutex</a> = &amp;m;
+<a name="l00104"></a>00104             <span class="keywordflow">if</span>( write ) <a class="code" href="a00177.html">mutex</a>-&gt;internal_acquire_writer();
+<a name="l00105"></a>00105             <span class="keywordflow">else</span>        <a class="code" href="a00177.html">mutex</a>-&gt;internal_acquire_reader();
+<a name="l00106"></a>00106         }
+<a name="l00107"></a>00107 
+<a name="l00109"></a>00109 
+<a name="l00110"></a><a class="code" href="a00202.html#3f0b1e3f2efab63336400348bd070226">00110</a>         <span class="keywordtype">bool</span> upgrade_to_writer() {
+<a name="l00111"></a>00111             __TBB_ASSERT( <a class="code" href="a00177.html">mutex</a>, <span class="stringliteral">"lock is not acquired"</span> );
+<a name="l00112"></a>00112             __TBB_ASSERT( !is_writer, <span class="stringliteral">"not a reader"</span> );
+<a name="l00113"></a>00113             is_writer = <span class="keyword">true</span>; 
+<a name="l00114"></a>00114             <span class="keywordflow">return</span> <a class="code" href="a00177.html">mutex</a>-&gt;internal_upgrade();
+<a name="l00115"></a>00115         }
+<a name="l00116"></a>00116 
+<a name="l00118"></a><a class="code" href="a00202.html#61b14d00a78185c9b2d206ebfc379124">00118</a>         <span class="keywordtype">void</span> <a class="code" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">release</a>() {
+<a name="l00119"></a>00119             __TBB_ASSERT( <a class="code" href="a00177.html">mutex</a>, <span class="stringliteral">"lock is not acquired"</span> );
+<a name="l00120"></a>00120             <a class="code" href="a00201.html">spin_rw_mutex</a> *m = <a class="code" href="a00177.html">mutex</a>; 
+<a name="l00121"></a>00121             <a class="code" href="a00177.html">mutex</a> = NULL;
+<a name="l00122"></a>00122 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00123"></a>00123 <span class="preprocessor"></span>            <span class="keywordflow">if</span>( is_writer ) m-&gt;internal_release_writer();
+<a name="l00124"></a>00124             <span class="keywordflow">else</span>            m-&gt;internal_release_reader();
+<a name="l00125"></a>00125 <span class="preprocessor">#else</span>
+<a name="l00126"></a>00126 <span class="preprocessor"></span>            <span class="keywordflow">if</span>( is_writer ) __TBB_AtomicAND( &amp;m-&gt;<a class="code" href="a00201.html#621b900d5fef23d2e401aabcbb777c76">state</a>, READERS ); 
+<a name="l00127"></a>00127             <span class="keywordflow">else</span>            __TBB_FetchAndAddWrelease( &amp;m-&gt;<a class="code" href="a00201.html#621b900d5fef23d2e401aabcbb777c76">state</a>, -(intptr_t)ONE_READER);
+<a name="l00128"></a>00128 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00129"></a>00129         }
+<a name="l00130"></a>00130 
+<a name="l00132"></a><a class="code" href="a00202.html#c2c2c38a08cb9080e87099fac3e5bc94">00132</a>         <span class="keywordtype">bool</span> downgrade_to_reader() {
+<a name="l00133"></a>00133 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00134"></a>00134 <span class="preprocessor"></span>            __TBB_ASSERT( <a class="code" href="a00177.html">mutex</a>, <span class="stringliteral">"lock is not acquired"</span> );
+<a name="l00135"></a>00135             __TBB_ASSERT( is_writer, <span class="stringliteral">"not a writer"</span> );
+<a name="l00136"></a>00136             <a class="code" href="a00177.html">mutex</a>-&gt;internal_downgrade();
+<a name="l00137"></a>00137 <span class="preprocessor">#else</span>
+<a name="l00138"></a>00138 <span class="preprocessor"></span>            __TBB_FetchAndAddW( &amp;<a class="code" href="a00177.html">mutex</a>-&gt;state, ((intptr_t)ONE_READER-WRITER));
+<a name="l00139"></a>00139 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00140"></a>00140             is_writer = <span class="keyword">false</span>;
+<a name="l00141"></a>00141 
+<a name="l00142"></a>00142             <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00143"></a>00143         }
+<a name="l00144"></a>00144 
+<a name="l00146"></a><a class="code" href="a00202.html#9879626968d9b9a04cd2ec0fb2e84ae1">00146</a>         <span class="keywordtype">bool</span> try_acquire( <a class="code" href="a00201.html">spin_rw_mutex</a>&amp; m, <span class="keywordtype">bool</span> write = <span class="keyword">true</span> ) {
+<a name="l00147"></a>00147             __TBB_ASSERT( !<a class="code" href="a00177.html">mutex</a>, <span class="stringliteral">"holding mutex already"</span> );
+<a name="l00148"></a>00148             <span class="keywordtype">bool</span> result;
+<a name="l00149"></a>00149             is_writer = write; 
+<a name="l00150"></a>00150             result = write? m.internal_try_acquire_writer()
+<a name="l00151"></a>00151                           : m.internal_try_acquire_reader();
+<a name="l00152"></a>00152             <span class="keywordflow">if</span>( result ) 
+<a name="l00153"></a>00153                 <a class="code" href="a00177.html">mutex</a> = &amp;m;
+<a name="l00154"></a>00154             <span class="keywordflow">return</span> result;
+<a name="l00155"></a>00155         }
+<a name="l00156"></a>00156 
+<a name="l00157"></a>00157     <span class="keyword">private</span>:
+<a name="l00159"></a>00159         <a class="code" href="a00201.html">spin_rw_mutex</a>* <a class="code" href="a00177.html">mutex</a>;
+<a name="l00160"></a>00160 
+<a name="l00162"></a>00162 
+<a name="l00163"></a>00163         <span class="keywordtype">bool</span> is_writer;
+<a name="l00164"></a>00164     };
+<a name="l00165"></a>00165 
+<a name="l00166"></a>00166     <span class="comment">// Mutex traits</span>
+<a name="l00167"></a>00167     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_rw_mutex = <span class="keyword">true</span>;
+<a name="l00168"></a>00168     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_recursive_mutex = <span class="keyword">false</span>;
+<a name="l00169"></a>00169     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fair_mutex = <span class="keyword">false</span>;
+<a name="l00170"></a>00170 
+<a name="l00171"></a>00171     <span class="comment">// ISO C++0x compatibility methods</span>
+<a name="l00172"></a>00172 
+<a name="l00174"></a><a class="code" href="a00201.html#4007d6e1523dbc3c2bb7f889ab789a8a">00174</a>     <span class="keywordtype">void</span> lock() {internal_acquire_writer();}
+<a name="l00175"></a>00175 
+<a name="l00177"></a>00177 
+<a name="l00178"></a><a class="code" href="a00201.html#088bb256be794cc47d3b83791632fdfc">00178</a>     <span class="keywordtype">bool</span> try_lock() {<span class="keywordflow">return</span> internal_try_acquire_writer();}
+<a name="l00179"></a>00179 
+<a name="l00181"></a><a class="code" href="a00201.html#f9f52ead2098eb5fb12da59d5ae53b55">00181</a>     <span class="keywordtype">void</span> unlock() {
+<a name="l00182"></a>00182 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00183"></a>00183 <span class="preprocessor"></span>        <span class="keywordflow">if</span>( state&amp;WRITER ) internal_release_writer();
+<a name="l00184"></a>00184         <span class="keywordflow">else</span>               internal_release_reader();
+<a name="l00185"></a>00185 <span class="preprocessor">#else</span>
+<a name="l00186"></a>00186 <span class="preprocessor"></span>        <span class="keywordflow">if</span>( state&amp;WRITER ) __TBB_AtomicAND( &amp;state, READERS ); 
+<a name="l00187"></a>00187         <span class="keywordflow">else</span>               __TBB_FetchAndAddWrelease( &amp;state, -(intptr_t)ONE_READER);
+<a name="l00188"></a>00188 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00189"></a>00189     }
+<a name="l00190"></a>00190 
+<a name="l00191"></a>00191     <span class="comment">// Methods for reader locks that resemble ISO C++0x compatibility methods.</span>
+<a name="l00192"></a>00192 
+<a name="l00194"></a><a class="code" href="a00201.html#13f799708ac4ca437a16be202e263e18">00194</a>     <span class="keywordtype">void</span> lock_read() {internal_acquire_reader();}
+<a name="l00195"></a>00195 
+<a name="l00197"></a>00197 
+<a name="l00198"></a><a class="code" href="a00201.html#b8667415869013f840d976aa406d385a">00198</a>     <span class="keywordtype">bool</span> try_lock_read() {<span class="keywordflow">return</span> internal_try_acquire_reader();}
+<a name="l00199"></a>00199 
+<a name="l00200"></a>00200 <span class="keyword">private</span>:
+<a name="l00201"></a>00201     <span class="keyword">typedef</span> intptr_t state_t;
+<a name="l00202"></a>00202     <span class="keyword">static</span> <span class="keyword">const</span> state_t WRITER = 1;
+<a name="l00203"></a>00203     <span class="keyword">static</span> <span class="keyword">const</span> state_t WRITER_PENDING = 2;
+<a name="l00204"></a>00204     <span class="keyword">static</span> <span class="keyword">const</span> state_t READERS = ~(WRITER | WRITER_PENDING);
+<a name="l00205"></a>00205     <span class="keyword">static</span> <span class="keyword">const</span> state_t ONE_READER = 4;
+<a name="l00206"></a>00206     <span class="keyword">static</span> <span class="keyword">const</span> state_t BUSY = WRITER | READERS;
+<a name="l00208"></a>00208 
+<a name="l00211"></a>00211     state_t state;
+<a name="l00212"></a>00212 
+<a name="l00213"></a>00213     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_construct();
+<a name="l00214"></a>00214 };
+<a name="l00215"></a>00215 
+<a name="l00216"></a>00216 __TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex)
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218 } <span class="comment">// namespace tbb</span>
+<a name="l00219"></a>00219 
+<a name="l00220"></a>00220 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_spin_rw_mutex_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00438.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00438.html
new file mode 100644 (file)
index 0000000..d75e2dd
--- /dev/null
@@ -0,0 +1,649 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>task.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>task.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_task_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_task_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_machine.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">typedef</span> <span class="keyword">struct </span>___itt_caller *__itt_caller;
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>tbb {
+<a name="l00030"></a>00030 
+<a name="l00031"></a>00031 <span class="keyword">class </span>task;
+<a name="l00032"></a>00032 <span class="keyword">class </span>task_list;
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="keyword">class </span>task_group_context;
+<a name="l00036"></a>00036 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00037"></a>00037 
+<a name="l00038"></a>00038 <span class="comment">// MSVC does not allow taking the address of a member that was defined </span>
+<a name="l00039"></a>00039 <span class="comment">// privately in task_base and made public in class task via a using declaration.</span>
+<a name="l00040"></a>00040 <span class="preprocessor">#if _MSC_VER || (__GNUC__==3 &amp;&amp; __GNUC_MINOR__&lt;3)</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_TASK_BASE_ACCESS public</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_TASK_BASE_ACCESS private</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00045"></a>00045 <span class="preprocessor"></span>
+<a name="l00046"></a>00046 <span class="keyword">namespace </span>internal {
+<a name="l00047"></a>00047 
+<a name="l00048"></a>00048     <span class="keyword">class </span>allocate_additional_child_of_proxy: no_assign {
+<a name="l00050"></a>00050         task* <span class="keyword">self</span>;
+<a name="l00051"></a>00051         task&amp; parent;
+<a name="l00052"></a>00052     <span class="keyword">public</span>:
+<a name="l00053"></a>00053         <span class="keyword">explicit</span> allocate_additional_child_of_proxy( task&amp; parent_ ) : self(NULL), parent(parent_) {}
+<a name="l00054"></a>00054         task&amp; __TBB_EXPORTED_METHOD allocate( size_t size ) <span class="keyword">const</span>;
+<a name="l00055"></a>00055         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD free( task&amp; ) <span class="keyword">const</span>;
+<a name="l00056"></a>00056     };
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058 }
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060 <span class="keyword">namespace </span>interface5 {
+<a name="l00061"></a>00061     <span class="keyword">namespace </span>internal {
+<a name="l00063"></a>00063 
+<a name="l00068"></a><a class="code" href="a00205.html">00068</a>         <span class="keyword">class </span><a class="code" href="a00205.html">task_base</a>: tbb::internal::no_copy {
+<a name="l00069"></a>00069         __TBB_TASK_BASE_ACCESS:
+<a name="l00070"></a>00070             <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00204.html">tbb::task</a>;
+<a name="l00071"></a>00071 
+<a name="l00073"></a>00073             <span class="keyword">static</span> <span class="keywordtype">void</span> spawn( <a class="code" href="a00204.html">task</a>&amp; t );
+<a name="l00074"></a>00074  
+<a name="l00076"></a>00076             <span class="keyword">static</span> <span class="keywordtype">void</span> spawn( <a class="code" href="a00207.html">task_list</a>&amp; list );
+<a name="l00077"></a>00077 
+<a name="l00079"></a>00079 
+<a name="l00081"></a>00081             <span class="keyword">static</span> tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( <a class="code" href="a00204.html">task</a>&amp; t ) {
+<a name="l00082"></a>00082                 <span class="keywordflow">return</span> tbb::internal::allocate_additional_child_of_proxy(t);
+<a name="l00083"></a>00083             }
+<a name="l00084"></a>00084 
+<a name="l00086"></a>00086 
+<a name="l00090"></a>00090             <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC <a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">destroy</a>( <a class="code" href="a00204.html">task</a>&amp; victim );
+<a name="l00091"></a>00091         }; 
+<a name="l00092"></a>00092     } <span class="comment">// internal</span>
+<a name="l00093"></a>00093 } <span class="comment">// interface5</span>
+<a name="l00094"></a>00094 
+<a name="l00096"></a>00096 <span class="keyword">namespace </span>internal {
+<a name="l00097"></a>00097 
+<a name="l00098"></a>00098     <span class="keyword">class </span>scheduler: no_copy {
+<a name="l00099"></a>00099     <span class="keyword">public</span>:
+<a name="l00101"></a>00101         <span class="keyword">virtual</span> <span class="keywordtype">void</span> spawn( <a class="code" href="a00204.html">task</a>&amp; first, <a class="code" href="a00204.html">task</a>*&amp; next ) = 0;
+<a name="l00102"></a>00102 
+<a name="l00104"></a>00104         <span class="keyword">virtual</span> <span class="keywordtype">void</span> wait_for_all( <a class="code" href="a00204.html">task</a>&amp; parent, <a class="code" href="a00204.html">task</a>* child ) = 0;
+<a name="l00105"></a>00105 
+<a name="l00107"></a>00107         <span class="keyword">virtual</span> <span class="keywordtype">void</span> spawn_root_and_wait( <a class="code" href="a00204.html">task</a>&amp; first, <a class="code" href="a00204.html">task</a>*&amp; next ) = 0;
+<a name="l00108"></a>00108 
+<a name="l00110"></a>00110         <span class="comment">//  Have to have it just to shut up overzealous compilation warnings</span>
+<a name="l00111"></a>00111         <span class="keyword">virtual</span> ~scheduler() = 0;
+<a name="l00112"></a>00112 <span class="preprocessor">#if __TBB_ARENA_PER_MASTER</span>
+<a name="l00113"></a>00113 <span class="preprocessor"></span>
+<a name="l00115"></a>00115         <span class="keyword">virtual</span> <span class="keywordtype">void</span> enqueue( <a class="code" href="a00204.html">task</a>&amp; t, <span class="keywordtype">void</span>* reserved ) = 0;
+<a name="l00116"></a>00116 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_ARENA_PER_MASTER */</span>
+<a name="l00117"></a>00117     };
+<a name="l00118"></a>00118 
+<a name="l00120"></a>00120 
+<a name="l00121"></a>00121     <span class="keyword">typedef</span> intptr_t reference_count;
+<a name="l00122"></a>00122 
+<a name="l00124"></a>00124     <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">short</span> affinity_id;
+<a name="l00125"></a>00125 
+<a name="l00126"></a>00126 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00127"></a>00127 <span class="preprocessor"></span>    <span class="keyword">struct </span>context_list_node_t {
+<a name="l00128"></a>00128         context_list_node_t *my_prev,
+<a name="l00129"></a>00129                             *my_next;
+<a name="l00130"></a>00130     };
+<a name="l00131"></a>00131 
+<a name="l00132"></a>00132     <span class="keyword">class </span>allocate_root_with_context_proxy: no_assign {
+<a name="l00133"></a>00133         task_group_context&amp; my_context;
+<a name="l00134"></a>00134     <span class="keyword">public</span>:
+<a name="l00135"></a>00135         allocate_root_with_context_proxy ( task_group_context&amp; ctx ) : my_context(ctx) {}
+<a name="l00136"></a>00136         task&amp; __TBB_EXPORTED_METHOD allocate( size_t size ) <span class="keyword">const</span>;
+<a name="l00137"></a>00137         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD free( task&amp; ) <span class="keyword">const</span>;
+<a name="l00138"></a>00138     };
+<a name="l00139"></a>00139 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141     <span class="keyword">class </span>allocate_root_proxy: no_assign {
+<a name="l00142"></a>00142     <span class="keyword">public</span>:
+<a name="l00143"></a>00143         <span class="keyword">static</span> task&amp; __TBB_EXPORTED_FUNC allocate( size_t size );
+<a name="l00144"></a>00144         <span class="keyword">static</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC free( task&amp; );
+<a name="l00145"></a>00145     };
+<a name="l00146"></a>00146 
+<a name="l00147"></a>00147     <span class="keyword">class </span>allocate_continuation_proxy: no_assign {
+<a name="l00148"></a>00148     <span class="keyword">public</span>:
+<a name="l00149"></a>00149         task&amp; __TBB_EXPORTED_METHOD allocate( size_t size ) <span class="keyword">const</span>;
+<a name="l00150"></a>00150         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD free( task&amp; ) <span class="keyword">const</span>;
+<a name="l00151"></a>00151     };
+<a name="l00152"></a>00152 
+<a name="l00153"></a>00153     <span class="keyword">class </span>allocate_child_proxy: no_assign {
+<a name="l00154"></a>00154     <span class="keyword">public</span>:
+<a name="l00155"></a>00155         task&amp; __TBB_EXPORTED_METHOD allocate( size_t size ) <span class="keyword">const</span>;
+<a name="l00156"></a>00156         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD free( task&amp; ) <span class="keyword">const</span>;
+<a name="l00157"></a>00157     };
+<a name="l00158"></a>00158 
+<a name="l00160"></a>00160 
+<a name="l00165"></a>00165     <span class="keyword">class </span>task_prefix {
+<a name="l00166"></a>00166     <span class="keyword">private</span>:
+<a name="l00167"></a>00167         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00204.html">tbb::task</a>;
+<a name="l00168"></a>00168         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00205.html">tbb::interface5::internal::task_base</a>;
+<a name="l00169"></a>00169         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00207.html">tbb::task_list</a>;
+<a name="l00170"></a>00170         <span class="keyword">friend</span> <span class="keyword">class </span>internal::scheduler;
+<a name="l00171"></a>00171         <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_root_proxy;
+<a name="l00172"></a>00172         <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_child_proxy;
+<a name="l00173"></a>00173         <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_continuation_proxy;
+<a name="l00174"></a>00174         <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_additional_child_of_proxy;
+<a name="l00175"></a>00175 
+<a name="l00176"></a>00176 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00178"></a>00178 <span class="preprocessor"></span>
+<a name="l00181"></a>00181 <span class="preprocessor">        task_group_context  *context;</span>
+<a name="l00182"></a>00182 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00183"></a>00183         
+<a name="l00185"></a>00185 
+<a name="l00190"></a>00190         scheduler* origin;
+<a name="l00191"></a>00191 
+<a name="l00193"></a>00193         scheduler* owner;
+<a name="l00194"></a>00194 
+<a name="l00196"></a>00196 
+<a name="l00199"></a>00199         <a class="code" href="a00204.html">tbb::task</a>* parent;
+<a name="l00200"></a>00200 
+<a name="l00202"></a>00202 
+<a name="l00206"></a>00206         reference_count ref_count;
+<a name="l00207"></a>00207 
+<a name="l00209"></a>00209 
+<a name="l00210"></a>00210         <span class="keywordtype">int</span> depth;
+<a name="l00211"></a>00211 
+<a name="l00213"></a>00213 
+<a name="l00214"></a>00214         <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> state;
+<a name="l00215"></a>00215 
+<a name="l00217"></a>00217 
+<a name="l00222"></a>00222         <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> extra_state;
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224         affinity_id affinity;
+<a name="l00225"></a>00225 
+<a name="l00227"></a>00227         <a class="code" href="a00204.html">tbb::task</a>* next;
+<a name="l00228"></a>00228 
+<a name="l00230"></a>00230         <a class="code" href="a00204.html">tbb::task</a>&amp; task() {<span class="keywordflow">return</span> *reinterpret_cast&lt;tbb::task*&gt;(<span class="keyword">this</span>+1);}
+<a name="l00231"></a>00231     };
+<a name="l00232"></a>00232 
+<a name="l00233"></a>00233 } <span class="comment">// namespace internal</span>
+<a name="l00235"></a>00235 <span class="comment"></span>
+<a name="l00236"></a>00236 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00237"></a>00237 <span class="preprocessor"></span>
+<a name="l00238"></a>00238 <span class="preprocessor">#if TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00239"></a>00239 <span class="preprocessor"></span>    <span class="keyword">class </span>tbb_exception;
+<a name="l00240"></a>00240 <span class="preprocessor">#else</span>
+<a name="l00241"></a>00241 <span class="preprocessor"></span>    <span class="keyword">namespace </span>internal {
+<a name="l00242"></a>00242         <span class="keyword">class </span>tbb_exception_ptr;
+<a name="l00243"></a>00243     }
+<a name="l00244"></a>00244 <span class="preprocessor">#endif </span><span class="comment">/* !TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00245"></a>00245 
+<a name="l00247"></a>00247 
+<a name="l00267"></a><a class="code" href="a00206.html">00267</a> <span class="keyword">class </span><a class="code" href="a00206.html">task_group_context</a> : internal::no_copy {
+<a name="l00268"></a>00268 <span class="keyword">private</span>:
+<a name="l00269"></a>00269 <span class="preprocessor">#if TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00270"></a>00270 <span class="preprocessor"></span>    <span class="keyword">typedef</span> <a class="code" href="a00211.html">tbb_exception</a> <a class="code" href="a00211.html">exception_container_type</a>;
+<a name="l00271"></a>00271 <span class="preprocessor">#else</span>
+<a name="l00272"></a>00272 <span class="preprocessor"></span>    <span class="keyword">typedef</span> <a class="code" href="a00212.html">internal::tbb_exception_ptr</a> <a class="code" href="a00211.html">exception_container_type</a>;
+<a name="l00273"></a>00273 <span class="preprocessor">#endif</span>
+<a name="l00274"></a>00274 <span class="preprocessor"></span>
+<a name="l00275"></a>00275     <span class="keyword">enum</span> version_traits_word_layout {
+<a name="l00276"></a>00276         traits_offset = 16,
+<a name="l00277"></a>00277         version_mask = 0xFFFF,
+<a name="l00278"></a>00278         traits_mask = 0xFFFFul &lt;&lt; traits_offset
+<a name="l00279"></a>00279     };
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281 <span class="keyword">public</span>:
+<a name="l00282"></a>00282     <span class="keyword">enum</span> kind_type {
+<a name="l00283"></a>00283         isolated,
+<a name="l00284"></a>00284         bound
+<a name="l00285"></a>00285     };
+<a name="l00286"></a>00286 
+<a name="l00287"></a>00287     <span class="keyword">enum</span> traits_type {
+<a name="l00288"></a>00288         exact_exception = 0x0001ul &lt;&lt; traits_offset,
+<a name="l00289"></a>00289         concurrent_wait = 0x0004ul &lt;&lt; traits_offset,
+<a name="l00290"></a>00290 <span class="preprocessor">#if TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00291"></a>00291 <span class="preprocessor"></span>        default_traits = 0
+<a name="l00292"></a>00292 <span class="preprocessor">#else</span>
+<a name="l00293"></a>00293 <span class="preprocessor"></span>        default_traits = exact_exception
+<a name="l00294"></a>00294 <span class="preprocessor">#endif </span><span class="comment">/* !TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00295"></a>00295     };
+<a name="l00296"></a>00296 
+<a name="l00297"></a>00297 <span class="keyword">private</span>:
+<a name="l00298"></a>00298     <span class="keyword">union </span>{
+<a name="l00300"></a>00300         kind_type my_kind;
+<a name="l00301"></a>00301         uintptr_t _my_kind_aligner;
+<a name="l00302"></a>00302     };
+<a name="l00303"></a>00303 
+<a name="l00305"></a>00305     <a class="code" href="a00206.html">task_group_context</a> *my_parent;
+<a name="l00306"></a>00306 
+<a name="l00308"></a>00308 
+<a name="l00310"></a>00310     internal::context_list_node_t my_node;
+<a name="l00311"></a>00311 
+<a name="l00313"></a>00313     __itt_caller itt_caller;
+<a name="l00314"></a>00314 
+<a name="l00316"></a>00316 
+<a name="l00319"></a>00319     <span class="keywordtype">char</span> _leading_padding[internal::NFS_MaxLineSize - 
+<a name="l00320"></a>00320                     2 * <span class="keyword">sizeof</span>(uintptr_t)- <span class="keyword">sizeof</span>(<span class="keywordtype">void</span>*) - <span class="keyword">sizeof</span>(internal::context_list_node_t)
+<a name="l00321"></a>00321                           - <span class="keyword">sizeof</span>(__itt_caller)];
+<a name="l00322"></a>00322     
+<a name="l00324"></a>00324     uintptr_t my_cancellation_requested;
+<a name="l00325"></a>00325     
+<a name="l00327"></a>00327 
+<a name="l00330"></a>00330     uintptr_t  my_version_and_traits;
+<a name="l00331"></a>00331 
+<a name="l00333"></a>00333     <a class="code" href="a00211.html">exception_container_type</a> *my_exception;
+<a name="l00334"></a>00334 
+<a name="l00336"></a>00336 
+<a name="l00339"></a>00339     <span class="keywordtype">void</span> *my_owner;
+<a name="l00340"></a>00340 
+<a name="l00342"></a>00342 
+<a name="l00343"></a>00343     <span class="keywordtype">char</span> _trailing_padding[internal::NFS_MaxLineSize - <span class="keyword">sizeof</span>(intptr_t) - 2 * <span class="keyword">sizeof</span>(<span class="keywordtype">void</span>*)];
+<a name="l00344"></a>00344 
+<a name="l00345"></a>00345 <span class="keyword">public</span>:
+<a name="l00347"></a>00347 
+<a name="l00374"></a><a class="code" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">00374</a>     <a class="code" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">task_group_context</a> ( kind_type relation_with_parent = bound,
+<a name="l00375"></a>00375                          uintptr_t traits = default_traits )
+<a name="l00376"></a>00376         : my_kind(relation_with_parent)
+<a name="l00377"></a>00377         , my_version_and_traits(1 | traits)
+<a name="l00378"></a>00378     {
+<a name="l00379"></a>00379         <a class="code" href="a00206.html#49a55352084fd44b8863d182e839e6dc">init</a>();
+<a name="l00380"></a>00380     }
+<a name="l00381"></a>00381 
+<a name="l00382"></a>00382     __TBB_EXPORTED_METHOD ~<a class="code" href="a00206.html">task_group_context</a> ();
+<a name="l00383"></a>00383 
+<a name="l00385"></a>00385 
+<a name="l00392"></a>00392     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00206.html#6d30d16bf1cd22f86c6afaf29c2b430c">reset</a> ();
+<a name="l00393"></a>00393 
+<a name="l00395"></a>00395 
+<a name="l00402"></a>00402     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD <a class="code" href="a00206.html#8bcdfdf4e6bfb76125b6de15c00b571d">cancel_group_execution</a> ();
+<a name="l00403"></a>00403 
+<a name="l00405"></a>00405     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD <a class="code" href="a00206.html#4db72f16210b0a991b2c134d6763a4cc">is_group_execution_cancelled</a> () <span class="keyword">const</span>;
+<a name="l00406"></a>00406 
+<a name="l00408"></a>00408 
+<a name="l00414"></a>00414     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00206.html#d97c8a03615594b71b4ef06ff75cf561">register_pending_exception</a> ();
+<a name="l00415"></a>00415 
+<a name="l00416"></a>00416 <span class="keyword">protected</span>:
+<a name="l00418"></a>00418 
+<a name="l00419"></a>00419     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00206.html#49a55352084fd44b8863d182e839e6dc">init</a> ();
+<a name="l00420"></a>00420 
+<a name="l00421"></a>00421 <span class="keyword">private</span>:
+<a name="l00422"></a>00422     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00204.html">task</a>;
+<a name="l00423"></a>00423     <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_root_with_context_proxy;
+<a name="l00424"></a>00424 
+<a name="l00425"></a>00425     <span class="keyword">static</span> <span class="keyword">const</span> kind_type binding_required = bound;
+<a name="l00426"></a>00426     <span class="keyword">static</span> <span class="keyword">const</span> kind_type binding_completed = kind_type(bound+1);
+<a name="l00427"></a>00427     <span class="keyword">static</span> <span class="keyword">const</span> kind_type detached = kind_type(binding_completed+1);
+<a name="l00428"></a>00428     <span class="keyword">static</span> <span class="keyword">const</span> kind_type dying = kind_type(detached+1);
+<a name="l00429"></a>00429 
+<a name="l00432"></a>00432     <span class="keywordtype">void</span> propagate_cancellation_from_ancestors ();
+<a name="l00433"></a>00433 
+<a name="l00434"></a>00434 }; <span class="comment">// class task_group_context</span>
+<a name="l00435"></a>00435 
+<a name="l00436"></a>00436 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00437"></a>00437 
+<a name="l00439"></a>00439 
+<a name="l00440"></a><a class="code" href="a00204.html">00440</a> <span class="keyword">class </span><a class="code" href="a00204.html">task</a>: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
+<a name="l00441"></a>00441 
+<a name="l00443"></a>00443     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_set_ref_count( <span class="keywordtype">int</span> count );
+<a name="l00444"></a>00444 
+<a name="l00446"></a>00446     internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
+<a name="l00447"></a>00447 
+<a name="l00448"></a>00448 <span class="keyword">protected</span>:
+<a name="l00450"></a><a class="code" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">00450</a>     <a class="code" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a>() {prefix().extra_state=1;}
+<a name="l00451"></a>00451 
+<a name="l00452"></a>00452 <span class="keyword">public</span>:
+<a name="l00454"></a><a class="code" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">00454</a>     <span class="keyword">virtual</span> <a class="code" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">~task</a>() {}
+<a name="l00455"></a>00455 
+<a name="l00457"></a>00457     <span class="keyword">virtual</span> <a class="code" href="a00204.html">task</a>* <a class="code" href="a00204.html#22c298cd40937a431a06777423f002f6">execute</a>() = 0;
+<a name="l00458"></a>00458 
+<a name="l00460"></a><a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e">00460</a>     <span class="keyword">enum</span> <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a> {
+<a name="l00462"></a>00462         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">executing</a>,
+<a name="l00464"></a>00464         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">reexecute</a>,
+<a name="l00466"></a>00466         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">ready</a>,
+<a name="l00468"></a>00468         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">allocated</a>,
+<a name="l00470"></a>00470         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">freed</a>,
+<a name="l00472"></a>00472         <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">recycle</a> 
+<a name="l00473"></a>00473     };
+<a name="l00474"></a>00474 
+<a name="l00475"></a>00475     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00476"></a>00476     <span class="comment">// Allocating tasks</span>
+<a name="l00477"></a>00477     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00478"></a>00478 
+<a name="l00480"></a><a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">00480</a>     <span class="keyword">static</span> internal::allocate_root_proxy <a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">allocate_root</a>() {
+<a name="l00481"></a>00481         <span class="keywordflow">return</span> internal::allocate_root_proxy();
+<a name="l00482"></a>00482     }
+<a name="l00483"></a>00483 
+<a name="l00484"></a>00484 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00486"></a><a class="code" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">00486</a> <span class="preprocessor">    static internal::allocate_root_with_context_proxy allocate_root( task_group_context&amp; ctx ) {</span>
+<a name="l00487"></a>00487 <span class="preprocessor"></span>        <span class="keywordflow">return</span> internal::allocate_root_with_context_proxy(ctx);
+<a name="l00488"></a>00488     }
+<a name="l00489"></a>00489 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00490"></a>00490 
+<a name="l00492"></a>00492 
+<a name="l00493"></a><a class="code" href="a00204.html#1434c79a5138993269d034008bff7329">00493</a>     internal::allocate_continuation_proxy&amp; allocate_continuation() {
+<a name="l00494"></a>00494         <span class="keywordflow">return</span> *reinterpret_cast&lt;internal::allocate_continuation_proxy*&gt;(<span class="keyword">this</span>);
+<a name="l00495"></a>00495     }
+<a name="l00496"></a>00496 
+<a name="l00498"></a><a class="code" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">00498</a>     internal::allocate_child_proxy&amp; allocate_child() {
+<a name="l00499"></a>00499         <span class="keywordflow">return</span> *reinterpret_cast&lt;internal::allocate_child_proxy*&gt;(<span class="keyword">this</span>);
+<a name="l00500"></a>00500     }
+<a name="l00501"></a>00501 
+<a name="l00503"></a>00503     <span class="keyword">using</span> task_base::allocate_additional_child_of;
+<a name="l00504"></a>00504 
+<a name="l00505"></a>00505 <span class="preprocessor">#if __TBB_DEPRECATED_TASK_INTERFACE</span>
+<a name="l00507"></a>00507 <span class="preprocessor"></span>
+<a name="l00511"></a>00511 <span class="preprocessor">    void __TBB_EXPORTED_METHOD destroy( task&amp; t );</span>
+<a name="l00512"></a>00512 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* !__TBB_DEPRECATED_TASK_INTERFACE */</span>
+<a name="l00514"></a>00514     using task_base::destroy;
+<a name="l00515"></a>00515 <span class="preprocessor">#endif </span><span class="comment">/* !__TBB_DEPRECATED_TASK_INTERFACE */</span>
+<a name="l00516"></a>00516 
+<a name="l00517"></a>00517     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00518"></a>00518     <span class="comment">// Recycling of tasks</span>
+<a name="l00519"></a>00519     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00520"></a>00520 
+<a name="l00522"></a>00522 
+<a name="l00528"></a><a class="code" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">00528</a>     <span class="keywordtype">void</span> recycle_as_continuation() {
+<a name="l00529"></a>00529         __TBB_ASSERT( prefix().state==executing, <span class="stringliteral">"execute not running?"</span> );
+<a name="l00530"></a>00530         prefix().state = allocated;
+<a name="l00531"></a>00531     }
+<a name="l00532"></a>00532 
+<a name="l00534"></a>00534 
+<a name="l00536"></a><a class="code" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">00536</a>     <span class="keywordtype">void</span> recycle_as_safe_continuation() {
+<a name="l00537"></a>00537         __TBB_ASSERT( prefix().state==executing, <span class="stringliteral">"execute not running?"</span> );
+<a name="l00538"></a>00538         prefix().state = recycle;
+<a name="l00539"></a>00539     }
+<a name="l00540"></a>00540 
+<a name="l00542"></a><a class="code" href="a00204.html#db399855177438bbc9cc61d508dae8d2">00542</a>     <span class="keywordtype">void</span> recycle_as_child_of( <a class="code" href="a00204.html">task</a>&amp; new_parent ) {
+<a name="l00543"></a>00543         internal::task_prefix&amp; p = prefix();
+<a name="l00544"></a>00544         __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, <span class="stringliteral">"execute not running, or already recycled"</span> );
+<a name="l00545"></a>00545         __TBB_ASSERT( prefix().ref_count==0, <span class="stringliteral">"no child tasks allowed when recycled as a child"</span> );
+<a name="l00546"></a>00546         __TBB_ASSERT( p.parent==NULL, <span class="stringliteral">"parent must be null"</span> );
+<a name="l00547"></a>00547         __TBB_ASSERT( new_parent.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().state&lt;=recycle, <span class="stringliteral">"corrupt parent's state"</span> );
+<a name="l00548"></a>00548         __TBB_ASSERT( new_parent.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().state!=freed, <span class="stringliteral">"parent already freed"</span> );
+<a name="l00549"></a>00549         p.state = allocated;
+<a name="l00550"></a>00550         p.parent = &amp;new_parent;
+<a name="l00551"></a>00551 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00552"></a>00552 <span class="preprocessor"></span>        p.<a class="code" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">context</a> = new_parent.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().context;
+<a name="l00553"></a>00553 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00554"></a>00554     }
+<a name="l00555"></a>00555 
+<a name="l00557"></a>00557 
+<a name="l00558"></a><a class="code" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">00558</a>     <span class="keywordtype">void</span> recycle_to_reexecute() {
+<a name="l00559"></a>00559         __TBB_ASSERT( prefix().state==executing, <span class="stringliteral">"execute not running, or already recycled"</span> );
+<a name="l00560"></a>00560         __TBB_ASSERT( prefix().ref_count==0, <span class="stringliteral">"no child tasks allowed when recycled for reexecution"</span> );
+<a name="l00561"></a>00561         prefix().state = reexecute;
+<a name="l00562"></a>00562     }
+<a name="l00563"></a>00563 
+<a name="l00564"></a>00564     <span class="comment">// All depth-related methods are obsolete, and are retained for the sake </span>
+<a name="l00565"></a>00565     <span class="comment">// of backward source compatibility only</span>
+<a name="l00566"></a>00566     intptr_t depth()<span class="keyword"> const </span>{<span class="keywordflow">return</span> 0;}
+<a name="l00567"></a>00567     <span class="keywordtype">void</span> set_depth( intptr_t ) {}
+<a name="l00568"></a>00568     <span class="keywordtype">void</span> add_to_depth( <span class="keywordtype">int</span> ) {}
+<a name="l00569"></a>00569 
+<a name="l00570"></a>00570 
+<a name="l00571"></a>00571     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00572"></a>00572     <span class="comment">// Spawning and blocking</span>
+<a name="l00573"></a>00573     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00574"></a>00574 
+<a name="l00576"></a><a class="code" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">00576</a>     <span class="keywordtype">void</span> set_ref_count( <span class="keywordtype">int</span> count ) {
+<a name="l00577"></a>00577 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00578"></a>00578 <span class="preprocessor"></span>        internal_set_ref_count(count);
+<a name="l00579"></a>00579 <span class="preprocessor">#else</span>
+<a name="l00580"></a>00580 <span class="preprocessor"></span>        prefix().ref_count = count;
+<a name="l00581"></a>00581 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00582"></a>00582     }
+<a name="l00583"></a>00583 
+<a name="l00585"></a>00585 
+<a name="l00586"></a><a class="code" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">00586</a>     <span class="keywordtype">void</span> increment_ref_count() {
+<a name="l00587"></a>00587         __TBB_FetchAndIncrementWacquire( &amp;prefix().ref_count );
+<a name="l00588"></a>00588     }
+<a name="l00589"></a>00589 
+<a name="l00591"></a>00591 
+<a name="l00592"></a><a class="code" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">00592</a>     <span class="keywordtype">int</span> decrement_ref_count() {
+<a name="l00593"></a>00593 <span class="preprocessor">#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT</span>
+<a name="l00594"></a>00594 <span class="preprocessor"></span>        <span class="keywordflow">return</span> int(internal_decrement_ref_count());
+<a name="l00595"></a>00595 <span class="preprocessor">#else</span>
+<a name="l00596"></a>00596 <span class="preprocessor"></span>        <span class="keywordflow">return</span> int(__TBB_FetchAndDecrementWrelease( &amp;prefix().ref_count ))-1;
+<a name="l00597"></a>00597 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */</span>
+<a name="l00598"></a>00598     }
+<a name="l00599"></a>00599 
+<a name="l00601"></a>00601     <span class="keyword">using</span> task_base::spawn;
+<a name="l00602"></a>00602 
+<a name="l00604"></a><a class="code" href="a00204.html#3ce28ca9baa771cfc37ecd72e69c4f3c">00604</a>     <span class="keywordtype">void</span> spawn_and_wait_for_all( <a class="code" href="a00204.html">task</a>&amp; child ) {
+<a name="l00605"></a>00605         prefix().owner-&gt;wait_for_all( *<span class="keyword">this</span>, &amp;child );
+<a name="l00606"></a>00606     }
+<a name="l00607"></a>00607 
+<a name="l00609"></a>00609     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD spawn_and_wait_for_all( <a class="code" href="a00207.html">task_list</a>&amp; list );
+<a name="l00610"></a>00610 
+<a name="l00612"></a><a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">00612</a>     <span class="keyword">static</span> <span class="keywordtype">void</span> spawn_root_and_wait( <a class="code" href="a00204.html">task</a>&amp; root ) {
+<a name="l00613"></a>00613         root.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().owner-&gt;spawn_root_and_wait( root, root.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().next );
+<a name="l00614"></a>00614     }
+<a name="l00615"></a>00615 
+<a name="l00617"></a>00617 
+<a name="l00619"></a>00619     <span class="keyword">static</span> <span class="keywordtype">void</span> spawn_root_and_wait( <a class="code" href="a00207.html">task_list</a>&amp; root_list );
+<a name="l00620"></a>00620 
+<a name="l00622"></a>00622 
+<a name="l00623"></a><a class="code" href="a00204.html#53d2615ad9c38859b4c8080936600283">00623</a>     <span class="keywordtype">void</span> wait_for_all() {
+<a name="l00624"></a>00624         prefix().owner-&gt;wait_for_all( *<span class="keyword">this</span>, NULL );
+<a name="l00625"></a>00625     }
+<a name="l00626"></a>00626 
+<a name="l00627"></a>00627 <span class="preprocessor">#if __TBB_ARENA_PER_MASTER</span>
+<a name="l00629"></a><a class="code" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">00629</a> <span class="preprocessor">    static void enqueue( task&amp; t ) {</span>
+<a name="l00630"></a>00630 <span class="preprocessor"></span>        t.prefix().owner-&gt;enqueue( t, NULL );
+<a name="l00631"></a>00631     }
+<a name="l00632"></a>00632 
+<a name="l00633"></a>00633 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_ARENA_PER_MASTER */</span>
+<a name="l00635"></a>00635     static task&amp; __TBB_EXPORTED_FUNC self();
+<a name="l00636"></a>00636 
+<a name="l00638"></a><a class="code" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">00638</a>     <a class="code" href="a00204.html">task</a>* parent()<span class="keyword"> const </span>{<span class="keywordflow">return</span> prefix().parent;}
+<a name="l00639"></a>00639 
+<a name="l00640"></a>00640 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00642"></a><a class="code" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">00642</a> <span class="preprocessor">    task_group_context* context() {return prefix().context;}</span>
+<a name="l00643"></a>00643 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>   
+<a name="l00644"></a>00644 
+<a name="l00646"></a><a class="code" href="a00204.html#f9169402702f56bf519448aaf34450aa">00646</a>     <span class="keywordtype">bool</span> is_stolen_task()<span class="keyword"> const </span>{
+<a name="l00647"></a>00647         <span class="keywordflow">return</span> (prefix().extra_state &amp; 0x80)!=0;
+<a name="l00648"></a>00648     }
+<a name="l00649"></a>00649 
+<a name="l00650"></a>00650     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00651"></a>00651     <span class="comment">// Debugging</span>
+<a name="l00652"></a>00652     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00653"></a>00653 
+<a name="l00655"></a><a class="code" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">00655</a>     <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a> state()<span class="keyword"> const </span>{<span class="keywordflow">return</span> <a class="code" href="a00204.html#4a3c415562d17905390ea5b49d12293e">state_type</a>(prefix().state);}
+<a name="l00656"></a>00656 
+<a name="l00658"></a><a class="code" href="a00204.html#ad774f55eaec008ae02b236423209ced">00658</a>     <span class="keywordtype">int</span> ref_count()<span class="keyword"> const </span>{
+<a name="l00659"></a>00659 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00660"></a>00660 <span class="preprocessor"></span>        internal::reference_count ref_count_ = prefix().ref_count;
+<a name="l00661"></a>00661         __TBB_ASSERT( ref_count_==<span class="keywordtype">int</span>(ref_count_), <span class="stringliteral">"integer overflow error"</span>);
+<a name="l00662"></a>00662 <span class="preprocessor">#endif</span>
+<a name="l00663"></a>00663 <span class="preprocessor"></span>        <span class="keywordflow">return</span> int(prefix().ref_count);
+<a name="l00664"></a>00664     }
+<a name="l00665"></a>00665 
+<a name="l00667"></a>00667     <span class="keywordtype">bool</span> __TBB_EXPORTED_METHOD is_owned_by_current_thread() <span class="keyword">const</span>;
+<a name="l00668"></a>00668 
+<a name="l00669"></a>00669     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00670"></a>00670     <span class="comment">// Affinity</span>
+<a name="l00671"></a>00671     <span class="comment">//------------------------------------------------------------------------</span>
+<a name="l00672"></a>00672  
+<a name="l00674"></a>00674 
+<a name="l00675"></a><a class="code" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">00675</a>     <span class="keyword">typedef</span> internal::affinity_id affinity_id;
+<a name="l00676"></a>00676 
+<a name="l00678"></a><a class="code" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">00678</a>     <span class="keywordtype">void</span> set_affinity( affinity_id <span class="keywordtype">id</span> ) {prefix().affinity = id;}
+<a name="l00679"></a>00679 
+<a name="l00681"></a><a class="code" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">00681</a>     affinity_id affinity()<span class="keyword"> const </span>{<span class="keywordflow">return</span> prefix().affinity;}
+<a name="l00682"></a>00682 
+<a name="l00684"></a>00684 
+<a name="l00688"></a>00688     <span class="keyword">virtual</span> <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD note_affinity( affinity_id <span class="keywordtype">id</span> );
+<a name="l00689"></a>00689 
+<a name="l00690"></a>00690 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00692"></a>00692 <span class="preprocessor"></span>
+<a name="l00693"></a><a class="code" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">00693</a> <span class="preprocessor">    bool cancel_group_execution () { return prefix().context-&gt;cancel_group_execution(); }</span>
+<a name="l00694"></a>00694 <span class="preprocessor"></span>
+<a name="l00696"></a><a class="code" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">00696</a>     <span class="keywordtype">bool</span> is_cancelled ()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> prefix().context-&gt;is_group_execution_cancelled(); }
+<a name="l00697"></a>00697 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00698"></a>00698 
+<a name="l00699"></a>00699 <span class="keyword">private</span>:
+<a name="l00700"></a>00700     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00205.html">interface5::internal::task_base</a>;
+<a name="l00701"></a>00701     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00207.html">task_list</a>;
+<a name="l00702"></a>00702     <span class="keyword">friend</span> <span class="keyword">class </span>internal::scheduler;
+<a name="l00703"></a>00703     <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_root_proxy;
+<a name="l00704"></a>00704 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00705"></a>00705 <span class="preprocessor"></span>    <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_root_with_context_proxy;
+<a name="l00706"></a>00706 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00707"></a>00707     <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_continuation_proxy;
+<a name="l00708"></a>00708     <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_child_proxy;
+<a name="l00709"></a>00709     <span class="keyword">friend</span> <span class="keyword">class </span>internal::allocate_additional_child_of_proxy;
+<a name="l00710"></a>00710     
+<a name="l00712"></a>00712 
+<a name="l00713"></a>00713     internal::task_prefix&amp; prefix( internal::version_tag* = NULL )<span class="keyword"> const </span>{
+<a name="l00714"></a>00714         <span class="keywordflow">return</span> reinterpret_cast&lt;internal::task_prefix*&gt;(const_cast&lt;task*&gt;(<span class="keyword">this</span>))[-1];
+<a name="l00715"></a>00715     }
+<a name="l00716"></a>00716 }; <span class="comment">// class task</span>
+<a name="l00717"></a>00717 
+<a name="l00719"></a>00719 
+<a name="l00720"></a><a class="code" href="a00167.html">00720</a> <span class="keyword">class </span><a class="code" href="a00167.html">empty_task</a>: <span class="keyword">public</span> <a class="code" href="a00204.html">task</a> {
+<a name="l00721"></a>00721     <span class="comment">/*override*/</span> <a class="code" href="a00204.html">task</a>* execute() {
+<a name="l00722"></a>00722         <span class="keywordflow">return</span> NULL;
+<a name="l00723"></a>00723     }
+<a name="l00724"></a>00724 };
+<a name="l00725"></a>00725 
+<a name="l00727"></a>00727 
+<a name="l00729"></a><a class="code" href="a00207.html">00729</a> <span class="keyword">class </span><a class="code" href="a00207.html">task_list</a>: internal::no_copy {
+<a name="l00730"></a>00730 <span class="keyword">private</span>:
+<a name="l00731"></a>00731     <a class="code" href="a00204.html">task</a>* first;
+<a name="l00732"></a>00732     <a class="code" href="a00204.html">task</a>** next_ptr;
+<a name="l00733"></a>00733     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00204.html">task</a>;
+<a name="l00734"></a>00734     <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00205.html">interface5::internal::task_base</a>;
+<a name="l00735"></a>00735 <span class="keyword">public</span>:
+<a name="l00737"></a><a class="code" href="a00207.html#416341c2047eaef50417b41eaf7e9de6">00737</a>     <a class="code" href="a00207.html">task_list</a>() : first(NULL), next_ptr(&amp;first) {}
+<a name="l00738"></a>00738 
+<a name="l00740"></a><a class="code" href="a00207.html#6d438f1499a02db1e59c24ab6043e5ba">00740</a>     ~<a class="code" href="a00207.html">task_list</a>() {}
+<a name="l00741"></a>00741 
+<a name="l00743"></a><a class="code" href="a00207.html#f3ac31e092814b90929f81bb30441959">00743</a>     <span class="keywordtype">bool</span> empty()<span class="keyword"> const </span>{<span class="keywordflow">return</span> !first;}
+<a name="l00744"></a>00744 
+<a name="l00746"></a><a class="code" href="a00207.html#4cd34756bc4763dafb8c84838a0124ff">00746</a>     <span class="keywordtype">void</span> push_back( <a class="code" href="a00204.html">task</a>&amp; <a class="code" href="a00204.html">task</a> ) {
+<a name="l00747"></a>00747         task.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().next = NULL;
+<a name="l00748"></a>00748         *next_ptr = &amp;task;
+<a name="l00749"></a>00749         next_ptr = &amp;task.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().next;
+<a name="l00750"></a>00750     }
+<a name="l00751"></a>00751 
+<a name="l00753"></a><a class="code" href="a00207.html#5fe85df5ed524418389d34051750347d">00753</a>     <a class="code" href="a00204.html">task</a>&amp; pop_front() {
+<a name="l00754"></a>00754         __TBB_ASSERT( !empty(), <span class="stringliteral">"attempt to pop item from empty task_list"</span> );
+<a name="l00755"></a>00755         <a class="code" href="a00204.html">task</a>* result = first;
+<a name="l00756"></a>00756         first = result-&gt;<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().next;
+<a name="l00757"></a>00757         <span class="keywordflow">if</span>( !first ) next_ptr = &amp;first;
+<a name="l00758"></a>00758         <span class="keywordflow">return</span> *result;
+<a name="l00759"></a>00759     }
+<a name="l00760"></a>00760 
+<a name="l00762"></a><a class="code" href="a00207.html#fce446ee13e025969945328f3ff59b95">00762</a>     <span class="keywordtype">void</span> clear() {
+<a name="l00763"></a>00763         first=NULL;
+<a name="l00764"></a>00764         next_ptr=&amp;first;
+<a name="l00765"></a>00765     }
+<a name="l00766"></a>00766 };
+<a name="l00767"></a>00767 
+<a name="l00768"></a>00768 <span class="keyword">inline</span> <span class="keywordtype">void</span> interface5::internal::task_base::spawn( task&amp; t ) {
+<a name="l00769"></a>00769     t.<a class="code" href="a00204.html#ac578940848e9dc8b4f1f2bcf17b627d">prefix</a>().owner-&gt;spawn( t, t.prefix().next );
+<a name="l00770"></a>00770 }
+<a name="l00771"></a>00771 
+<a name="l00772"></a>00772 <span class="keyword">inline</span> <span class="keywordtype">void</span> interface5::internal::task_base::spawn( task_list&amp; list ) {
+<a name="l00773"></a>00773     <span class="keywordflow">if</span>( task* t = list.first ) {
+<a name="l00774"></a>00774         t-&gt;prefix().owner-&gt;spawn( *t, *list.next_ptr );
+<a name="l00775"></a>00775         list.clear();
+<a name="l00776"></a>00776     }
+<a name="l00777"></a>00777 }
+<a name="l00778"></a>00778 
+<a name="l00779"></a><a class="code" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">00779</a> <span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="a00204.html#ce8ce689c26a4ddf343829bc3c73290a">task::spawn_root_and_wait</a>( <a class="code" href="a00207.html">task_list</a>&amp; root_list ) {
+<a name="l00780"></a>00780     <span class="keywordflow">if</span>( <a class="code" href="a00204.html">task</a>* t = root_list.<a class="code" href="a00207.html#78fcc389819ee34427d510f7d8cb8b1e">first</a> ) {
+<a name="l00781"></a>00781         t-&gt;prefix().owner-&gt;spawn_root_and_wait( *t, *root_list.<a class="code" href="a00207.html#21553a32bcd08f980aa28f61254307da">next_ptr</a> );
+<a name="l00782"></a>00782         root_list.<a class="code" href="a00207.html#fce446ee13e025969945328f3ff59b95">clear</a>();
+<a name="l00783"></a>00783     }
+<a name="l00784"></a>00784 }
+<a name="l00785"></a>00785 
+<a name="l00786"></a>00786 } <span class="comment">// namespace tbb</span>
+<a name="l00787"></a>00787 
+<a name="l00788"></a>00788 <span class="keyword">inline</span> <span class="keywordtype">void</span> *operator new( size_t bytes, <span class="keyword">const</span> tbb::internal::allocate_root_proxy&amp; ) {
+<a name="l00789"></a>00789     <span class="keywordflow">return</span> &amp;tbb::internal::allocate_root_proxy::allocate(bytes);
+<a name="l00790"></a>00790 }
+<a name="l00791"></a>00791 
+<a name="l00792"></a>00792 <span class="keyword">inline</span> <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* task, <span class="keyword">const</span> tbb::internal::allocate_root_proxy&amp; ) {
+<a name="l00793"></a>00793     tbb::internal::allocate_root_proxy::free( *static_cast&lt;tbb::task*&gt;(task) );
+<a name="l00794"></a>00794 }
+<a name="l00795"></a>00795 
+<a name="l00796"></a>00796 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00797"></a>00797 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">void</span> *operator new( size_t bytes, <span class="keyword">const</span> tbb::internal::allocate_root_with_context_proxy&amp; p ) {
+<a name="l00798"></a>00798     <span class="keywordflow">return</span> &amp;p.allocate(bytes);
+<a name="l00799"></a>00799 }
+<a name="l00800"></a>00800 
+<a name="l00801"></a>00801 <span class="keyword">inline</span> <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* task, <span class="keyword">const</span> tbb::internal::allocate_root_with_context_proxy&amp; p ) {
+<a name="l00802"></a>00802     p.free( *static_cast&lt;tbb::task*&gt;(task) );
+<a name="l00803"></a>00803 }
+<a name="l00804"></a>00804 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00805"></a>00805 
+<a name="l00806"></a>00806 <span class="keyword">inline</span> <span class="keywordtype">void</span> *operator new( size_t bytes, <span class="keyword">const</span> tbb::internal::allocate_continuation_proxy&amp; p ) {
+<a name="l00807"></a>00807     <span class="keywordflow">return</span> &amp;p.allocate(bytes);
+<a name="l00808"></a>00808 }
+<a name="l00809"></a>00809 
+<a name="l00810"></a>00810 <span class="keyword">inline</span> <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* task, <span class="keyword">const</span> tbb::internal::allocate_continuation_proxy&amp; p ) {
+<a name="l00811"></a>00811     p.free( *static_cast&lt;tbb::task*&gt;(task) );
+<a name="l00812"></a>00812 }
+<a name="l00813"></a>00813 
+<a name="l00814"></a>00814 <span class="keyword">inline</span> <span class="keywordtype">void</span> *operator new( size_t bytes, <span class="keyword">const</span> tbb::internal::allocate_child_proxy&amp; p ) {
+<a name="l00815"></a>00815     <span class="keywordflow">return</span> &amp;p.allocate(bytes);
+<a name="l00816"></a>00816 }
+<a name="l00817"></a>00817 
+<a name="l00818"></a>00818 <span class="keyword">inline</span> <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* task, <span class="keyword">const</span> tbb::internal::allocate_child_proxy&amp; p ) {
+<a name="l00819"></a>00819     p.free( *static_cast&lt;tbb::task*&gt;(task) );
+<a name="l00820"></a>00820 }
+<a name="l00821"></a>00821 
+<a name="l00822"></a>00822 <span class="keyword">inline</span> <span class="keywordtype">void</span> *operator new( size_t bytes, <span class="keyword">const</span> tbb::internal::allocate_additional_child_of_proxy&amp; p ) {
+<a name="l00823"></a>00823     <span class="keywordflow">return</span> &amp;p.allocate(bytes);
+<a name="l00824"></a>00824 }
+<a name="l00825"></a>00825 
+<a name="l00826"></a>00826 <span class="keyword">inline</span> <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* task, <span class="keyword">const</span> tbb::internal::allocate_additional_child_of_proxy&amp; p ) {
+<a name="l00827"></a>00827     p.free( *static_cast&lt;tbb::task*&gt;(task) );
+<a name="l00828"></a>00828 }
+<a name="l00829"></a>00829 
+<a name="l00830"></a>00830 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_task_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00447.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00447.html
new file mode 100644 (file)
index 0000000..913b462
--- /dev/null
@@ -0,0 +1,270 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>task_group.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>task_group.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_task_group_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_task_group_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "task.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="keyword">namespace </span>tbb {
+<a name="l00028"></a>00028 
+<a name="l00029"></a>00029 <span class="keyword">namespace </span>internal {
+<a name="l00030"></a>00030     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt; <span class="keyword">class </span>task_handle_task;
+<a name="l00031"></a>00031 }
+<a name="l00032"></a>00032 
+<a name="l00033"></a>00033 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00034"></a>00034 <span class="keyword">class </span>task_handle : internal::no_assign {
+<a name="l00035"></a>00035     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> _F&gt; <span class="keyword">friend</span> <span class="keyword">class </span>internal::task_handle_task;
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037     <span class="keyword">static</span> <span class="keyword">const</span> intptr_t scheduled = 0x1;
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039     F my_func;
+<a name="l00040"></a>00040     intptr_t my_state;
+<a name="l00041"></a>00041 
+<a name="l00042"></a>00042     <span class="keywordtype">void</span> mark_scheduled () {
+<a name="l00043"></a>00043         <span class="comment">// The check here is intentionally lax to avoid the impact of interlocked operation</span>
+<a name="l00044"></a>00044         <span class="keywordflow">if</span> ( my_state &amp; scheduled )
+<a name="l00045"></a>00045             internal::throw_exception( internal::eid_invalid_multiple_scheduling );
+<a name="l00046"></a>00046         my_state |= scheduled;
+<a name="l00047"></a>00047     }
+<a name="l00048"></a>00048 <span class="keyword">public</span>:
+<a name="l00049"></a>00049     task_handle( <span class="keyword">const</span> F&amp; f ) : my_func(f), my_state(0) {}
+<a name="l00050"></a>00050 
+<a name="l00051"></a>00051     <span class="keywordtype">void</span> operator() ()<span class="keyword"> const </span>{ my_func(); }
+<a name="l00052"></a>00052 };
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054 <span class="keyword">enum</span> task_group_status {
+<a name="l00055"></a>00055     not_complete,
+<a name="l00056"></a>00056     complete,
+<a name="l00057"></a>00057     canceled
+<a name="l00058"></a>00058 };
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060 <span class="keyword">namespace </span>internal {
+<a name="l00061"></a>00061 
+<a name="l00062"></a>00062 <span class="comment">// Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task.</span>
+<a name="l00063"></a>00063 <span class="comment">//#pragma warning(disable: 588)</span>
+<a name="l00064"></a>00064 
+<a name="l00065"></a>00065 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00066"></a>00066 <span class="keyword">class </span>function_task : <span class="keyword">public</span> task {
+<a name="l00067"></a>00067     F my_func;
+<a name="l00068"></a>00068     <span class="comment">/*override*/</span> <a class="code" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a>* execute() {
+<a name="l00069"></a>00069         my_func();
+<a name="l00070"></a>00070         <span class="keywordflow">return</span> NULL;
+<a name="l00071"></a>00071     }
+<a name="l00072"></a>00072 <span class="keyword">public</span>:
+<a name="l00073"></a>00073     function_task( <span class="keyword">const</span> F&amp; f ) : my_func(f) {}
+<a name="l00074"></a>00074 };
+<a name="l00075"></a>00075 
+<a name="l00076"></a>00076 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00077"></a>00077 <span class="keyword">class </span>task_handle_task : <span class="keyword">public</span> task {
+<a name="l00078"></a>00078     task_handle&lt;F&gt;&amp; my_handle;
+<a name="l00079"></a>00079     <span class="comment">/*override*/</span> <a class="code" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">task</a>* execute() {
+<a name="l00080"></a>00080         my_handle();
+<a name="l00081"></a>00081         <span class="keywordflow">return</span> NULL;
+<a name="l00082"></a>00082     }
+<a name="l00083"></a>00083 <span class="keyword">public</span>:
+<a name="l00084"></a>00084     task_handle_task( task_handle&lt;F&gt;&amp; h ) : my_handle(h) { h.mark_scheduled(); }
+<a name="l00085"></a>00085 };
+<a name="l00086"></a>00086 
+<a name="l00087"></a>00087 <span class="keyword">class </span>task_group_base : internal::no_copy {
+<a name="l00088"></a>00088 <span class="keyword">protected</span>:
+<a name="l00089"></a>00089     empty_task* my_root;
+<a name="l00090"></a>00090     task_group_context my_context;
+<a name="l00091"></a>00091 
+<a name="l00092"></a>00092     task&amp; owner () { <span class="keywordflow">return</span> *my_root; }
+<a name="l00093"></a>00093 
+<a name="l00094"></a>00094     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00095"></a>00095     task_group_status internal_run_and_wait( F&amp; f ) {
+<a name="l00096"></a>00096         __TBB_TRY {
+<a name="l00097"></a>00097             <span class="keywordflow">if</span> ( !my_context.is_group_execution_cancelled() )
+<a name="l00098"></a>00098                 f();
+<a name="l00099"></a>00099         } __TBB_CATCH( ... ) {
+<a name="l00100"></a>00100             my_context.register_pending_exception();
+<a name="l00101"></a>00101         }
+<a name="l00102"></a>00102         <span class="keywordflow">return</span> wait();
+<a name="l00103"></a>00103     }
+<a name="l00104"></a>00104 
+<a name="l00105"></a>00105     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F, <span class="keyword">typename</span> Task&gt;
+<a name="l00106"></a>00106     <span class="keywordtype">void</span> internal_run( F&amp; f ) {
+<a name="l00107"></a>00107         owner().spawn( *<span class="keyword">new</span>( owner().allocate_additional_child_of(*my_root) ) Task(f) );
+<a name="l00108"></a>00108     }
+<a name="l00109"></a>00109 
+<a name="l00110"></a>00110 <span class="keyword">public</span>:
+<a name="l00111"></a>00111     task_group_base( uintptr_t traits = 0 )
+<a name="l00112"></a>00112         : my_context(task_group_context::bound, task_group_context::default_traits | traits)
+<a name="l00113"></a>00113     {
+<a name="l00114"></a>00114         my_root = <span class="keyword">new</span>( <a class="code" href="a00204.html#23acb0da0afd690da797f9f882027d34">task::allocate_root</a>(my_context) ) empty_task;
+<a name="l00115"></a>00115         my_root-&gt;set_ref_count(1);
+<a name="l00116"></a>00116     }
+<a name="l00117"></a>00117 
+<a name="l00118"></a>00118     ~task_group_base() {
+<a name="l00119"></a>00119         <span class="keywordflow">if</span>( my_root-&gt;ref_count() &gt; 1 ) {
+<a name="l00120"></a>00120             <span class="keywordtype">bool</span> stack_unwinding_in_progress = std::uncaught_exception();
+<a name="l00121"></a>00121             <span class="comment">// Always attempt to do proper cleanup to avoid inevitable memory corruption </span>
+<a name="l00122"></a>00122             <span class="comment">// in case of missing wait (for the sake of better testability &amp; debuggability)</span>
+<a name="l00123"></a>00123             <span class="keywordflow">if</span> ( !is_canceling() )
+<a name="l00124"></a>00124                 cancel();
+<a name="l00125"></a>00125             __TBB_TRY {
+<a name="l00126"></a>00126                 my_root-&gt;wait_for_all();
+<a name="l00127"></a>00127             } __TBB_CATCH (...) {
+<a name="l00128"></a>00128                 <a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">task::destroy</a>(*my_root);
+<a name="l00129"></a>00129                 __TBB_RETHROW();
+<a name="l00130"></a>00130             }
+<a name="l00131"></a>00131             <a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">task::destroy</a>(*my_root);
+<a name="l00132"></a>00132             <span class="keywordflow">if</span> ( !stack_unwinding_in_progress )
+<a name="l00133"></a>00133                 internal::throw_exception( internal::eid_missing_wait );
+<a name="l00134"></a>00134         }
+<a name="l00135"></a>00135         <span class="keywordflow">else</span> {
+<a name="l00136"></a>00136             <a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">task::destroy</a>(*my_root);
+<a name="l00137"></a>00137         }
+<a name="l00138"></a>00138     }
+<a name="l00139"></a>00139 
+<a name="l00140"></a>00140     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00141"></a>00141     <span class="keywordtype">void</span> run( task_handle&lt;F&gt;&amp; h ) {
+<a name="l00142"></a>00142         internal_run&lt; task_handle&lt;F&gt;, internal::task_handle_task&lt;F&gt; &gt;( h );
+<a name="l00143"></a>00143     }
+<a name="l00144"></a>00144 
+<a name="l00145"></a>00145     task_group_status wait() {
+<a name="l00146"></a>00146         __TBB_TRY {
+<a name="l00147"></a>00147             my_root-&gt;wait_for_all();
+<a name="l00148"></a>00148         } __TBB_CATCH( ... ) {
+<a name="l00149"></a>00149             my_context.reset();
+<a name="l00150"></a>00150             __TBB_RETHROW();
+<a name="l00151"></a>00151         }
+<a name="l00152"></a>00152         <span class="keywordflow">if</span> ( my_context.is_group_execution_cancelled() ) {
+<a name="l00153"></a>00153             my_context.reset();
+<a name="l00154"></a>00154             <span class="keywordflow">return</span> canceled;
+<a name="l00155"></a>00155         }
+<a name="l00156"></a>00156         <span class="keywordflow">return</span> complete;
+<a name="l00157"></a>00157     }
+<a name="l00158"></a>00158 
+<a name="l00159"></a>00159     <span class="keywordtype">bool</span> is_canceling() {
+<a name="l00160"></a>00160         <span class="keywordflow">return</span> my_context.is_group_execution_cancelled();
+<a name="l00161"></a>00161     }
+<a name="l00162"></a>00162 
+<a name="l00163"></a>00163     <span class="keywordtype">void</span> cancel() {
+<a name="l00164"></a>00164         my_context.cancel_group_execution();
+<a name="l00165"></a>00165     }
+<a name="l00166"></a>00166 }; <span class="comment">// class task_group_base</span>
+<a name="l00167"></a>00167 
+<a name="l00168"></a>00168 } <span class="comment">// namespace internal</span>
+<a name="l00169"></a>00169 
+<a name="l00170"></a>00170 <span class="keyword">class </span>task_group : <span class="keyword">public</span> internal::task_group_base {
+<a name="l00171"></a>00171 <span class="keyword">public</span>:
+<a name="l00172"></a>00172     task_group () : task_group_base( task_group_context::concurrent_wait ) {}
+<a name="l00173"></a>00173 
+<a name="l00174"></a>00174 <span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00175"></a>00175 <span class="preprocessor"></span>    ~task_group() __TBB_TRY {
+<a name="l00176"></a>00176         __TBB_ASSERT( my_root-&gt;ref_count() != 0, NULL );
+<a name="l00177"></a>00177         <span class="keywordflow">if</span>( my_root-&gt;ref_count() &gt; 1 )
+<a name="l00178"></a>00178             my_root-&gt;wait_for_all();
+<a name="l00179"></a>00179     }
+<a name="l00180"></a>00180 <span class="preprocessor">#if TBB_USE_EXCEPTIONS</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span>    <span class="keywordflow">catch</span> (...) {
+<a name="l00182"></a>00182         <span class="comment">// Have to destroy my_root here as the base class destructor won't be called</span>
+<a name="l00183"></a>00183         <a class="code" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">task::destroy</a>(*my_root);
+<a name="l00184"></a>00184         <span class="keywordflow">throw</span>;
+<a name="l00185"></a>00185     }
+<a name="l00186"></a>00186 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_EXCEPTIONS */</span>
+<a name="l00187"></a>00187 <span class="preprocessor">#endif </span><span class="comment">/* TBB_DEPRECATED */</span>
+<a name="l00188"></a>00188 
+<a name="l00189"></a>00189 <span class="preprocessor">#if __SUNPRO_CC</span>
+<a name="l00190"></a>00190 <span class="preprocessor"></span>    <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00191"></a>00191     <span class="keywordtype">void</span> run( task_handle&lt;F&gt;&amp; h ) {
+<a name="l00192"></a>00192         internal_run&lt; task_handle&lt;F&gt;, internal::task_handle_task&lt;F&gt; &gt;( h );
+<a name="l00193"></a>00193     }
+<a name="l00194"></a>00194 <span class="preprocessor">#else</span>
+<a name="l00195"></a>00195 <span class="preprocessor"></span>    <span class="keyword">using</span> task_group_base::run;
+<a name="l00196"></a>00196 <span class="preprocessor">#endif</span>
+<a name="l00197"></a>00197 <span class="preprocessor"></span>
+<a name="l00198"></a>00198     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00199"></a>00199     <span class="keywordtype">void</span> run( <span class="keyword">const</span> F&amp; f ) {
+<a name="l00200"></a>00200         internal_run&lt; const F, internal::function_task&lt;F&gt; &gt;( f );
+<a name="l00201"></a>00201     }
+<a name="l00202"></a>00202 
+<a name="l00203"></a>00203     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00204"></a>00204     task_group_status run_and_wait( <span class="keyword">const</span> F&amp; f ) {
+<a name="l00205"></a>00205         <span class="keywordflow">return</span> internal_run_and_wait&lt;const F&gt;( f );
+<a name="l00206"></a>00206     }
+<a name="l00207"></a>00207 
+<a name="l00208"></a>00208     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00209"></a>00209     task_group_status run_and_wait( task_handle&lt;F&gt;&amp; h ) {
+<a name="l00210"></a>00210       <span class="keywordflow">return</span> internal_run_and_wait&lt; task_handle&lt;F&gt; &gt;( h );
+<a name="l00211"></a>00211     }
+<a name="l00212"></a>00212 }; <span class="comment">// class task_group</span>
+<a name="l00213"></a>00213 
+<a name="l00214"></a>00214 <span class="keyword">class </span>structured_task_group : <span class="keyword">public</span> internal::task_group_base {
+<a name="l00215"></a>00215 <span class="keyword">public</span>:
+<a name="l00216"></a>00216     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> F&gt;
+<a name="l00217"></a>00217     task_group_status run_and_wait ( task_handle&lt;F&gt;&amp; h ) {
+<a name="l00218"></a>00218         <span class="keywordflow">return</span> internal_run_and_wait&lt; task_handle&lt;F&gt; &gt;( h );
+<a name="l00219"></a>00219     }
+<a name="l00220"></a>00220 
+<a name="l00221"></a>00221     task_group_status wait() {
+<a name="l00222"></a>00222         task_group_status res = task_group_base::wait();
+<a name="l00223"></a>00223         my_root-&gt;set_ref_count(1);
+<a name="l00224"></a>00224         <span class="keywordflow">return</span> res;
+<a name="l00225"></a>00225     }
+<a name="l00226"></a>00226 }; <span class="comment">// class structured_task_group</span>
+<a name="l00227"></a>00227 
+<a name="l00228"></a>00228 <span class="keyword">inline</span> 
+<a name="l00229"></a>00229 <span class="keywordtype">bool</span> is_current_task_group_canceling() {
+<a name="l00230"></a>00230     <span class="keywordflow">return</span> <a class="code" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">task::self</a>().<a class="code" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">is_cancelled</a>();
+<a name="l00231"></a>00231 }
+<a name="l00232"></a>00232 
+<a name="l00233"></a>00233 <span class="keyword">template</span>&lt;<span class="keyword">class</span> F&gt;
+<a name="l00234"></a>00234 task_handle&lt;F&gt; make_task( <span class="keyword">const</span> F&amp; f ) {
+<a name="l00235"></a>00235     <span class="keywordflow">return</span> task_handle&lt;F&gt;( f );
+<a name="l00236"></a>00236 }
+<a name="l00237"></a>00237 
+<a name="l00238"></a>00238 } <span class="comment">// namespace tbb</span>
+<a name="l00239"></a>00239 
+<a name="l00240"></a>00240 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_task_group_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00454.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00454.html
new file mode 100644 (file)
index 0000000..c462d98
--- /dev/null
@@ -0,0 +1,99 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>task_scheduler_init.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>task_scheduler_init.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_task_scheduler_init_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_task_scheduler_init_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="keyword">namespace </span>tbb {
+<a name="l00027"></a>00027 
+<a name="l00028"></a>00028 <span class="keyword">typedef</span> std::size_t stack_size_type;
+<a name="l00029"></a>00029 
+<a name="l00031"></a>00031 <span class="keyword">namespace </span>internal {
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034     <span class="keyword">class </span>scheduler;
+<a name="l00035"></a>00035 } <span class="comment">// namespace internal</span>
+<a name="l00037"></a>00037 <span class="comment"></span>
+<a name="l00039"></a>00039 
+<a name="l00042"></a><a class="code" href="a00208.html">00042</a> <span class="keyword">class </span><a class="code" href="a00208.html">task_scheduler_init</a>: internal::no_copy {
+<a name="l00044"></a>00044     internal::scheduler* my_scheduler;
+<a name="l00045"></a>00045 <span class="keyword">public</span>:
+<a name="l00046"></a>00046 
+<a name="l00048"></a><a class="code" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">00048</a>     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> <a class="code" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a> = -1;
+<a name="l00049"></a>00049 
+<a name="l00051"></a><a class="code" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">00051</a>     <span class="keyword">static</span> <span class="keyword">const</span> <span class="keywordtype">int</span> <a class="code" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">deferred</a> = -2;
+<a name="l00052"></a>00052 
+<a name="l00054"></a>00054 
+<a name="l00062"></a>00062     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00208.html#d476053cc712e572554823492a5229ce">initialize</a>( <span class="keywordtype">int</span> number_of_threads=<a class="code" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a> );
+<a name="l00063"></a>00063 
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00208.html#d476053cc712e572554823492a5229ce">initialize</a>( <span class="keywordtype">int</span> number_of_threads, stack_size_type thread_stack_size );
+<a name="l00067"></a>00067 
+<a name="l00069"></a>00069     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">terminate</a>();
+<a name="l00070"></a>00070 
+<a name="l00072"></a><a class="code" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">00072</a>     <a class="code" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">task_scheduler_init</a>( <span class="keywordtype">int</span> number_of_threads=<a class="code" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">automatic</a>, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL)  {
+<a name="l00073"></a>00073         <a class="code" href="a00208.html#d476053cc712e572554823492a5229ce">initialize</a>( number_of_threads, thread_stack_size );
+<a name="l00074"></a>00074     }
+<a name="l00075"></a>00075 
+<a name="l00077"></a><a class="code" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">00077</a>     <a class="code" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">~task_scheduler_init</a>() {
+<a name="l00078"></a>00078         <span class="keywordflow">if</span>( my_scheduler ) 
+<a name="l00079"></a>00079             <a class="code" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">terminate</a>();
+<a name="l00080"></a>00080         internal::poison_pointer( my_scheduler );
+<a name="l00081"></a>00081     }
+<a name="l00083"></a>00083 
+<a name="l00090"></a>00090     <span class="keyword">static</span> <span class="keywordtype">int</span> __TBB_EXPORTED_FUNC <a class="code" href="a00208.html#ba00714c33a41a3c2216f48613971cab">default_num_threads</a> ();
+<a name="l00091"></a>00091 
+<a name="l00093"></a><a class="code" href="a00208.html#12752282977029f23416642bc03e8b74">00093</a>     <span class="keywordtype">bool</span> <a class="code" href="a00208.html#12752282977029f23416642bc03e8b74">is_active</a>()<span class="keyword"> const </span>{ <span class="keywordflow">return</span> my_scheduler != NULL; }
+<a name="l00094"></a>00094 };
+<a name="l00095"></a>00095 
+<a name="l00096"></a>00096 } <span class="comment">// namespace tbb</span>
+<a name="l00097"></a>00097 
+<a name="l00098"></a>00098 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_task_scheduler_init_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00455.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00455.html
new file mode 100644 (file)
index 0000000..0d5a4d3
--- /dev/null
@@ -0,0 +1,90 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>task_scheduler_observer.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>task_scheduler_observer.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_task_scheduler_observer_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_task_scheduler_observer_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if __TBB_SCHEDULER_OBSERVER</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>
+<a name="l00028"></a>00028 <span class="keyword">namespace </span>tbb {
+<a name="l00029"></a>00029 
+<a name="l00030"></a>00030 <span class="keyword">namespace </span>internal {
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 <span class="keyword">class </span>observer_proxy;
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034 <span class="keyword">class </span>task_scheduler_observer_v3 {
+<a name="l00035"></a>00035     <span class="keyword">friend</span> <span class="keyword">class </span>observer_proxy;
+<a name="l00036"></a>00036     observer_proxy* my_proxy;
+<a name="l00037"></a>00037     atomic&lt;intptr_t&gt; my_busy_count;
+<a name="l00038"></a>00038 <span class="keyword">public</span>:
+<a name="l00040"></a>00040     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD observe( <span class="keywordtype">bool</span> state=<span class="keyword">true</span> );
+<a name="l00041"></a>00041 
+<a name="l00043"></a>00043     <span class="keywordtype">bool</span> is_observing()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_proxy!=NULL;}
+<a name="l00044"></a>00044 
+<a name="l00046"></a>00046     task_scheduler_observer_v3() : my_proxy(NULL) {my_busy_count=0;}
+<a name="l00047"></a>00047 
+<a name="l00049"></a>00049     <span class="keyword">virtual</span> <span class="keywordtype">void</span> on_scheduler_entry( <span class="keywordtype">bool</span> <span class="comment">/*is_worker*/</span> ) {} 
+<a name="l00050"></a>00050 
+<a name="l00052"></a>00052     <span class="keyword">virtual</span> <span class="keywordtype">void</span> on_scheduler_exit( <span class="keywordtype">bool</span> <span class="comment">/*is_worker*/</span> ) {}
+<a name="l00053"></a>00053 
+<a name="l00055"></a>00055     <span class="keyword">virtual</span> ~task_scheduler_observer_v3() {observe(<span class="keyword">false</span>);}
+<a name="l00056"></a>00056 };
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058 } <span class="comment">// namespace internal</span>
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060 <span class="keyword">typedef</span> internal::task_scheduler_observer_v3 task_scheduler_observer;
+<a name="l00061"></a>00061 
+<a name="l00062"></a>00062 } <span class="comment">// namespace tbb</span>
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_SCHEDULER_OBSERVER */</span>
+<a name="l00065"></a>00065 
+<a name="l00066"></a>00066 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_task_scheduler_observer_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00457.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00457.html
new file mode 100644 (file)
index 0000000..0f5074f
--- /dev/null
@@ -0,0 +1,94 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include "aligned_space.h"</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include "atomic.h"</span>
+<a name="l00034"></a>00034 <span class="preprocessor">#include "blocked_range.h"</span>
+<a name="l00035"></a>00035 <span class="preprocessor">#include "blocked_range2d.h"</span>
+<a name="l00036"></a>00036 <span class="preprocessor">#include "blocked_range3d.h"</span>
+<a name="l00037"></a>00037 <span class="preprocessor">#include "cache_aligned_allocator.h"</span>
+<a name="l00038"></a>00038 <span class="preprocessor">#include "combinable.h"</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#include "concurrent_unordered_map.h"</span>
+<a name="l00040"></a>00040 <span class="preprocessor">#include "concurrent_hash_map.h"</span>
+<a name="l00041"></a>00041 <span class="preprocessor">#include "concurrent_queue.h"</span>
+<a name="l00042"></a>00042 <span class="preprocessor">#include "concurrent_vector.h"</span>
+<a name="l00043"></a>00043 <span class="preprocessor">#include "critical_section.h"</span>
+<a name="l00044"></a>00044 <span class="preprocessor">#include "enumerable_thread_specific.h"</span>
+<a name="l00045"></a>00045 <span class="preprocessor">#include "mutex.h"</span>
+<a name="l00046"></a>00046 <span class="preprocessor">#include "null_mutex.h"</span>
+<a name="l00047"></a>00047 <span class="preprocessor">#include "null_rw_mutex.h"</span>
+<a name="l00048"></a>00048 <span class="preprocessor">#include "parallel_do.h"</span>
+<a name="l00049"></a>00049 <span class="preprocessor">#include "parallel_for.h"</span>
+<a name="l00050"></a>00050 <span class="preprocessor">#include "parallel_for_each.h"</span>
+<a name="l00051"></a>00051 <span class="preprocessor">#include "parallel_invoke.h"</span>
+<a name="l00052"></a>00052 <span class="preprocessor">#include "parallel_reduce.h"</span>
+<a name="l00053"></a>00053 <span class="preprocessor">#include "parallel_scan.h"</span>
+<a name="l00054"></a>00054 <span class="preprocessor">#include "parallel_sort.h"</span>
+<a name="l00055"></a>00055 <span class="preprocessor">#include "partitioner.h"</span>
+<a name="l00056"></a>00056 <span class="preprocessor">#include "pipeline.h"</span>
+<a name="l00057"></a>00057 <span class="preprocessor">#include "queuing_mutex.h"</span>
+<a name="l00058"></a>00058 <span class="preprocessor">#include "queuing_rw_mutex.h"</span>
+<a name="l00059"></a>00059 <span class="preprocessor">#include "reader_writer_lock.h"</span>
+<a name="l00060"></a>00060 <span class="preprocessor">#include "recursive_mutex.h"</span>
+<a name="l00061"></a>00061 <span class="preprocessor">#include "spin_mutex.h"</span>
+<a name="l00062"></a>00062 <span class="preprocessor">#include "spin_rw_mutex.h"</span>
+<a name="l00063"></a>00063 <span class="preprocessor">#include "task.h"</span>
+<a name="l00064"></a>00064 <span class="preprocessor">#include "task_group.h"</span>
+<a name="l00065"></a>00065 <span class="preprocessor">#include "task_scheduler_init.h"</span>
+<a name="l00066"></a>00066 <span class="preprocessor">#include "task_scheduler_observer.h"</span>
+<a name="l00067"></a>00067 <span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00068"></a>00068 <span class="preprocessor">#include "tbb_exception.h"</span>
+<a name="l00069"></a>00069 <span class="preprocessor">#include "tbb_thread.h"</span>
+<a name="l00070"></a>00070 <span class="preprocessor">#include "tick_count.h"</span>
+<a name="l00071"></a>00071 
+<a name="l00072"></a>00072 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00458.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00458.html
new file mode 100644 (file)
index 0000000..feebad0
--- /dev/null
@@ -0,0 +1,212 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_allocator.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_allocator.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_allocator_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_allocator_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00026"></a>00026 
+<a name="l00027"></a>00027 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00029"></a>00029 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00032"></a>00032 <span class="preprocessor"></span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include &lt;cstring&gt;</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span>
+<a name="l00039"></a>00039 <span class="keyword">namespace </span>tbb {
+<a name="l00040"></a>00040 
+<a name="l00042"></a>00042 <span class="keyword">namespace </span>internal {
+<a name="l00043"></a>00043 
+<a name="l00045"></a>00045 
+<a name="l00046"></a>00046     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC deallocate_via_handler_v3( <span class="keywordtype">void</span> *p );
+<a name="l00047"></a>00047 
+<a name="l00049"></a>00049 
+<a name="l00050"></a>00050     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n );
+<a name="l00051"></a>00051 
+<a name="l00053"></a>00053     <span class="keywordtype">bool</span> __TBB_EXPORTED_FUNC is_malloc_used_v3();
+<a name="l00054"></a>00054 }
+<a name="l00056"></a>00056 
+<a name="l00057"></a>00057 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00058"></a>00058 <span class="preprocessor"></span>    <span class="comment">// Workaround for erroneous "unreferenced parameter" warning in method destroy.</span>
+<a name="l00059"></a>00059 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4100)</span>
+<a name="l00061"></a>00061 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span>
+<a name="l00064"></a>00064 
+<a name="l00069"></a>00069 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00070"></a><a class="code" href="a00209.html">00070</a> <span class="keyword">class </span><a class="code" href="a00209.html">tbb_allocator</a> {
+<a name="l00071"></a>00071 <span class="keyword">public</span>:
+<a name="l00072"></a>00072     <span class="keyword">typedef</span> <span class="keyword">typename</span> internal::allocator_type&lt;T&gt;::value_type value_type;
+<a name="l00073"></a>00073     <span class="keyword">typedef</span> value_type* pointer;
+<a name="l00074"></a>00074     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type* const_pointer;
+<a name="l00075"></a>00075     <span class="keyword">typedef</span> value_type&amp; reference;
+<a name="l00076"></a>00076     <span class="keyword">typedef</span> <span class="keyword">const</span> value_type&amp; const_reference;
+<a name="l00077"></a>00077     <span class="keyword">typedef</span> size_t size_type;
+<a name="l00078"></a>00078     <span class="keyword">typedef</span> ptrdiff_t difference_type;
+<a name="l00079"></a>00079     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00080"></a>00080         <span class="keyword">typedef</span> <a class="code" href="a00209.html">tbb_allocator&lt;U&gt;</a> other;
+<a name="l00081"></a>00081     };
+<a name="l00082"></a>00082 
+<a name="l00084"></a><a class="code" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">00084</a>     <span class="keyword">enum</span> <a class="code" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">malloc_type</a> {
+<a name="l00085"></a>00085         scalable, 
+<a name="l00086"></a>00086         standard
+<a name="l00087"></a>00087     };
+<a name="l00088"></a>00088 
+<a name="l00089"></a>00089     <a class="code" href="a00209.html">tbb_allocator</a>() throw() {}
+<a name="l00090"></a>00090     tbb_allocator( <span class="keyword">const</span> tbb_allocator&amp; ) throw() {}
+<a name="l00091"></a>00091     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; tbb_allocator(<span class="keyword">const</span> tbb_allocator&lt;U&gt;&amp;) throw() {}
+<a name="l00092"></a>00092 
+<a name="l00093"></a>00093     pointer address(reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00094"></a>00094     const_pointer address(const_reference x)<span class="keyword"> const </span>{<span class="keywordflow">return</span> &amp;x;}
+<a name="l00095"></a>00095     
+<a name="l00097"></a><a class="code" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">00097</a>     pointer <a class="code" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">allocate</a>( size_type n, <span class="keyword">const</span> <span class="keywordtype">void</span>* <span class="comment">/*hint*/</span> = 0) {
+<a name="l00098"></a>00098         <span class="keywordflow">return</span> pointer(internal::allocate_via_handler_v3( n * <span class="keyword">sizeof</span>(value_type) ));
+<a name="l00099"></a>00099     }
+<a name="l00100"></a>00100 
+<a name="l00102"></a><a class="code" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">00102</a>     <span class="keywordtype">void</span> <a class="code" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">deallocate</a>( pointer p, size_type ) {
+<a name="l00103"></a>00103         internal::deallocate_via_handler_v3(p);        
+<a name="l00104"></a>00104     }
+<a name="l00105"></a>00105 
+<a name="l00107"></a><a class="code" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">00107</a>     size_type <a class="code" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">max_size</a>() const throw() {
+<a name="l00108"></a>00108         size_type max = static_cast&lt;size_type&gt;(-1) / <span class="keyword">sizeof</span> (value_type);
+<a name="l00109"></a>00109         <span class="keywordflow">return</span> (max &gt; 0 ? max : 1);
+<a name="l00110"></a>00110     }
+<a name="l00111"></a>00111     
+<a name="l00113"></a><a class="code" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">00113</a>     <span class="keywordtype">void</span> <a class="code" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">construct</a>( pointer p, <span class="keyword">const</span> value_type&amp; value ) {::new((<span class="keywordtype">void</span>*)(p)) value_type(value);}
+<a name="l00114"></a>00114 
+<a name="l00116"></a><a class="code" href="a00209.html#ef133522bf55f05a605bee0763208281">00116</a>     <span class="keywordtype">void</span> <a class="code" href="a00209.html#ef133522bf55f05a605bee0763208281">destroy</a>( pointer p ) {p-&gt;~value_type();}
+<a name="l00117"></a>00117 
+<a name="l00119"></a><a class="code" href="a00209.html#78701e7454ef8e1a25b5acd364367080">00119</a>     <span class="keyword">static</span> <a class="code" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">malloc_type</a> <a class="code" href="a00209.html#78701e7454ef8e1a25b5acd364367080">allocator_type</a>() {
+<a name="l00120"></a>00120         <span class="keywordflow">return</span> internal::is_malloc_used_v3() ? standard : scalable;
+<a name="l00121"></a>00121     }
+<a name="l00122"></a>00122 };
+<a name="l00123"></a>00123 
+<a name="l00124"></a>00124 <span class="preprocessor">#if _MSC_VER &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00125"></a>00125 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00126"></a>00126 <span class="preprocessor"></span><span class="preprocessor">#endif // warning 4100 is back</span>
+<a name="l00127"></a>00127 <span class="preprocessor"></span>
+<a name="l00129"></a>00129 
+<a name="l00130"></a>00130 <span class="keyword">template</span>&lt;&gt; 
+<a name="l00131"></a><a class="code" href="a00210.html">00131</a> <span class="keyword">class </span><a class="code" href="a00209.html">tbb_allocator</a>&lt;void&gt; {
+<a name="l00132"></a>00132 <span class="keyword">public</span>:
+<a name="l00133"></a>00133     <span class="keyword">typedef</span> <span class="keywordtype">void</span>* pointer;
+<a name="l00134"></a>00134     <span class="keyword">typedef</span> <span class="keyword">const</span> <span class="keywordtype">void</span>* const_pointer;
+<a name="l00135"></a>00135     <span class="keyword">typedef</span> <span class="keywordtype">void</span> value_type;
+<a name="l00136"></a>00136     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00137"></a>00137         <span class="keyword">typedef</span> <a class="code" href="a00209.html">tbb_allocator&lt;U&gt;</a> other;
+<a name="l00138"></a>00138     };
+<a name="l00139"></a>00139 };
+<a name="l00140"></a>00140 
+<a name="l00141"></a>00141 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00142"></a>00142 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> <a class="code" href="a00209.html">tbb_allocator&lt;T&gt;</a>&amp;, <span class="keyword">const</span> <a class="code" href="a00209.html">tbb_allocator&lt;U&gt;</a>&amp; ) {<span class="keywordflow">return</span> <span class="keyword">true</span>;}
+<a name="l00143"></a>00143 
+<a name="l00144"></a>00144 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00145"></a>00145 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> tbb_allocator&lt;T&gt;&amp;, <span class="keyword">const</span> tbb_allocator&lt;U&gt;&amp; ) {<span class="keywordflow">return</span> <span class="keyword">false</span>;}
+<a name="l00146"></a>00146 
+<a name="l00148"></a>00148 
+<a name="l00153"></a>00153 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X&gt; <span class="keyword">class </span>Allocator = tbb_allocator&gt;
+<a name="l00154"></a><a class="code" href="a00218.html">00154</a> <span class="keyword">class </span><a class="code" href="a00218.html">zero_allocator</a> : <span class="keyword">public</span> Allocator&lt;T&gt;
+<a name="l00155"></a>00155 {
+<a name="l00156"></a>00156 <span class="keyword">public</span>:
+<a name="l00157"></a>00157     <span class="keyword">typedef</span> Allocator&lt;T&gt; base_allocator_type;
+<a name="l00158"></a>00158     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::value_type value_type;
+<a name="l00159"></a>00159     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::pointer pointer;
+<a name="l00160"></a>00160     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::const_pointer const_pointer;
+<a name="l00161"></a>00161     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::reference reference;
+<a name="l00162"></a>00162     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::const_reference const_reference;
+<a name="l00163"></a>00163     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::size_type size_type;
+<a name="l00164"></a>00164     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::difference_type difference_type;
+<a name="l00165"></a>00165     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00166"></a>00166         <span class="keyword">typedef</span> <a class="code" href="a00218.html">zero_allocator&lt;U, Allocator&gt;</a> other;
+<a name="l00167"></a>00167     };
+<a name="l00168"></a>00168 
+<a name="l00169"></a>00169     <a class="code" href="a00218.html">zero_allocator</a>() <span class="keywordflow">throw</span>() { }
+<a name="l00170"></a>00170     <a class="code" href="a00218.html">zero_allocator</a>(<span class="keyword">const</span> <a class="code" href="a00218.html">zero_allocator</a> &amp;a) <span class="keywordflow">throw</span>() : base_allocator_type( a ) { }
+<a name="l00171"></a>00171     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt;
+<a name="l00172"></a>00172     <a class="code" href="a00218.html">zero_allocator</a>(<span class="keyword">const</span> <a class="code" href="a00218.html">zero_allocator&lt;U&gt;</a> &amp;a) <span class="keywordflow">throw</span>() : base_allocator_type( Allocator&lt;U&gt;( a ) ) { }
+<a name="l00173"></a>00173 
+<a name="l00174"></a>00174     pointer allocate(<span class="keyword">const</span> size_type n, <span class="keyword">const</span> <span class="keywordtype">void</span> *hint = 0 ) {
+<a name="l00175"></a>00175         pointer ptr = base_allocator_type::allocate( n, hint );
+<a name="l00176"></a>00176         std::memset( ptr, 0, n * <span class="keyword">sizeof</span>(value_type) );
+<a name="l00177"></a>00177         <span class="keywordflow">return</span> ptr;
+<a name="l00178"></a>00178     }
+<a name="l00179"></a>00179 };
+<a name="l00180"></a>00180 
+<a name="l00182"></a>00182 
+<a name="l00183"></a>00183 <span class="keyword">template</span>&lt;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keyword">class </span>Allocator&gt; 
+<a name="l00184"></a><a class="code" href="a00219.html">00184</a> <span class="keyword">class </span><a class="code" href="a00218.html">zero_allocator</a>&lt;void, Allocator&gt; : <span class="keyword">public</span> Allocator&lt;void&gt; {
+<a name="l00185"></a>00185 <span class="keyword">public</span>:
+<a name="l00186"></a>00186     <span class="keyword">typedef</span> Allocator&lt;void&gt; base_allocator_type;
+<a name="l00187"></a>00187     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::value_type value_type;
+<a name="l00188"></a>00188     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::pointer pointer;
+<a name="l00189"></a>00189     <span class="keyword">typedef</span> <span class="keyword">typename</span> base_allocator_type::const_pointer const_pointer;
+<a name="l00190"></a>00190     <span class="keyword">template</span>&lt;<span class="keyword">typename</span> U&gt; <span class="keyword">struct </span>rebind {
+<a name="l00191"></a>00191         <span class="keyword">typedef</span> <a class="code" href="a00218.html">zero_allocator&lt;U, Allocator&gt;</a> other;
+<a name="l00192"></a>00192     };
+<a name="l00193"></a>00193 };
+<a name="l00194"></a>00194 
+<a name="l00195"></a>00195 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T1, <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X1&gt; <span class="keyword">class </span>B1, <span class="keyword">typename</span> T2, <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X2&gt; <span class="keyword">class </span>B2&gt;
+<a name="l00196"></a>00196 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==( <span class="keyword">const</span> <a class="code" href="a00218.html">zero_allocator&lt;T1,B1&gt;</a> &amp;a, <span class="keyword">const</span> <a class="code" href="a00218.html">zero_allocator&lt;T2,B2&gt;</a> &amp;b) {
+<a name="l00197"></a>00197     <span class="keywordflow">return</span> static_cast&lt; B1&lt;T1&gt; &gt;(a) == <span class="keyword">static_cast</span>&lt; B2&lt;T2&gt; &gt;(b);
+<a name="l00198"></a>00198 }
+<a name="l00199"></a>00199 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T1, <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X1&gt; <span class="keyword">class </span>B1, <span class="keyword">typename</span> T2, <span class="keyword">template</span>&lt;<span class="keyword">typename</span> X2&gt; <span class="keyword">class </span>B2&gt;
+<a name="l00200"></a>00200 <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=( <span class="keyword">const</span> zero_allocator&lt;T1,B1&gt; &amp;a, <span class="keyword">const</span> zero_allocator&lt;T2,B2&gt; &amp;b) {
+<a name="l00201"></a>00201     <span class="keywordflow">return</span> static_cast&lt; B1&lt;T1&gt; &gt;(a) != <span class="keyword">static_cast</span>&lt; B2&lt;T2&gt; &gt;(b);
+<a name="l00202"></a>00202 }
+<a name="l00203"></a>00203 
+<a name="l00204"></a>00204 } <span class="comment">// namespace tbb </span>
+<a name="l00205"></a>00205 
+<a name="l00206"></a>00206 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_allocator_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00462.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00462.html
new file mode 100644 (file)
index 0000000..4b3fe64
--- /dev/null
@@ -0,0 +1,206 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_config.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_config.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_config_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_config_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00033"></a>00033 <span class="preprocessor">#ifndef TBB_USE_DEBUG</span>
+<a name="l00034"></a>00034 <span class="preprocessor"></span><span class="preprocessor">#ifdef TBB_DO_ASSERT</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_DEBUG TBB_DO_ASSERT</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_DEBUG 0</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_DO_ASSERT */</span>
+<a name="l00039"></a>00039 <span class="preprocessor">#else</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span><span class="preprocessor">#define TBB_DO_ASSERT TBB_USE_DEBUG</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_DEBUG */</span>
+<a name="l00042"></a>00042 
+<a name="l00043"></a>00043 <span class="preprocessor">#ifndef TBB_USE_ASSERT</span>
+<a name="l00044"></a>00044 <span class="preprocessor"></span><span class="preprocessor">#ifdef TBB_DO_ASSERT</span>
+<a name="l00045"></a>00045 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_ASSERT TBB_DO_ASSERT</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span><span class="preprocessor">#else </span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_ASSERT TBB_USE_DEBUG</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_DO_ASSERT */</span>
+<a name="l00049"></a>00049 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00050"></a>00050 
+<a name="l00051"></a>00051 <span class="preprocessor">#ifndef TBB_USE_THREADING_TOOLS</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span><span class="preprocessor">#ifdef TBB_DO_THREADING_TOOLS</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_THREADING_TOOLS TBB_DO_THREADING_TOOLS</span>
+<a name="l00054"></a>00054 <span class="preprocessor"></span><span class="preprocessor">#else </span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG</span>
+<a name="l00056"></a>00056 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_DO_THREADING_TOOLS */</span>
+<a name="l00057"></a>00057 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_THREADING_TOOLS */</span>
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059 <span class="preprocessor">#ifndef TBB_USE_PERFORMANCE_WARNINGS</span>
+<a name="l00060"></a>00060 <span class="preprocessor"></span><span class="preprocessor">#ifdef TBB_PERFORMANCE_WARNINGS</span>
+<a name="l00061"></a>00061 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS</span>
+<a name="l00062"></a>00062 <span class="preprocessor"></span><span class="preprocessor">#else </span>
+<a name="l00063"></a>00063 <span class="preprocessor"></span><span class="preprocessor">#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG</span>
+<a name="l00064"></a>00064 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_PEFORMANCE_WARNINGS */</span>
+<a name="l00065"></a>00065 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_PERFORMANCE_WARNINGS */</span>
+<a name="l00066"></a>00066 
+<a name="l00067"></a>00067 <span class="preprocessor">#if !defined(__EXCEPTIONS) &amp;&amp; !defined(_CPPUNWIND) &amp;&amp; !defined(__SUNPRO_CC) || defined(_XBOX)</span>
+<a name="l00068"></a>00068 <span class="preprocessor"></span><span class="preprocessor">    #if TBB_USE_EXCEPTIONS</span>
+<a name="l00069"></a>00069 <span class="preprocessor"></span><span class="preprocessor">        #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0.</span>
+<a name="l00070"></a>00070 <span class="preprocessor"></span><span class="preprocessor">    #elif !defined(TBB_USE_EXCEPTIONS)</span>
+<a name="l00071"></a>00071 <span class="preprocessor"></span><span class="preprocessor">        #define TBB_USE_EXCEPTIONS 0</span>
+<a name="l00072"></a>00072 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00073"></a>00073 <span class="preprocessor"></span><span class="preprocessor">#elif !defined(TBB_USE_EXCEPTIONS)</span>
+<a name="l00074"></a>00074 <span class="preprocessor"></span><span class="preprocessor">    #define TBB_USE_EXCEPTIONS 1</span>
+<a name="l00075"></a>00075 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00076"></a>00076 <span class="preprocessor"></span>
+<a name="l00077"></a>00077 <span class="preprocessor">#ifndef TBB_IMPLEMENT_CPP0X</span>
+<a name="l00078"></a>00078 <span class="preprocessor"></span>
+<a name="l00079"></a>00079 <span class="preprocessor">    #if __GNUC__==4 &amp;&amp; __GNUC_MINOR__&gt;=4 &amp;&amp; __GXX_EXPERIMENTAL_CXX0X__</span>
+<a name="l00080"></a>00080 <span class="preprocessor"></span><span class="preprocessor">        #define TBB_IMPLEMENT_CPP0X 0</span>
+<a name="l00081"></a>00081 <span class="preprocessor"></span><span class="preprocessor">    #else</span>
+<a name="l00082"></a>00082 <span class="preprocessor"></span><span class="preprocessor">        #define TBB_IMPLEMENT_CPP0X 1</span>
+<a name="l00083"></a>00083 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00084"></a>00084 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_IMPLEMENT_CPP0X */</span>
+<a name="l00085"></a>00085 
+<a name="l00088"></a>00088 <span class="preprocessor">#ifndef __TBB_COUNT_TASK_NODES</span>
+<a name="l00089"></a>00089 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT</span>
+<a name="l00090"></a>00090 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00091"></a>00091 <span class="preprocessor"></span>
+<a name="l00092"></a>00092 <span class="preprocessor">#ifndef __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00093"></a>00093 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_TASK_GROUP_CONTEXT 1</span>
+<a name="l00094"></a>00094 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00095"></a>00095 
+<a name="l00096"></a>00096 <span class="preprocessor">#ifndef __TBB_SCHEDULER_OBSERVER</span>
+<a name="l00097"></a>00097 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_SCHEDULER_OBSERVER 1</span>
+<a name="l00098"></a>00098 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_SCHEDULER_OBSERVER */</span>
+<a name="l00099"></a>00099 
+<a name="l00100"></a>00100 <span class="preprocessor">#ifndef __TBB_ARENA_PER_MASTER</span>
+<a name="l00101"></a>00101 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_ARENA_PER_MASTER 1</span>
+<a name="l00102"></a>00102 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_ARENA_PER_MASTER */</span>
+<a name="l00103"></a>00103 
+<a name="l00104"></a>00104 <span class="preprocessor">#if !defined(__TBB_SURVIVE_THREAD_SWITCH) &amp;&amp; (_WIN32 || _WIN64 || __linux__)</span>
+<a name="l00105"></a>00105 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_SURVIVE_THREAD_SWITCH 1</span>
+<a name="l00106"></a>00106 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __TBB_SURVIVE_THREAD_SWITCH */</span>
+<a name="l00107"></a>00107 
+<a name="l00108"></a>00108 
+<a name="l00109"></a>00109 <span class="comment">/* TODO: The following condition should be extended as soon as new compilers/runtimes </span>
+<a name="l00110"></a>00110 <span class="comment">         with std::exception_ptr support appear. */</span>
+<a name="l00111"></a>00111 <span class="preprocessor">#define __TBB_EXCEPTION_PTR_PRESENT  (_MSC_VER &gt;= 1600 || __GXX_EXPERIMENTAL_CXX0X__ &amp;&amp; (__GNUC__==4 &amp;&amp; __GNUC_MINOR__&gt;=4))</span>
+<a name="l00112"></a>00112 <span class="preprocessor"></span>
+<a name="l00113"></a>00113 
+<a name="l00114"></a>00114 <span class="preprocessor">#ifndef TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span><span class="preprocessor">    #if __TBB_EXCEPTION_PTR_PRESENT</span>
+<a name="l00116"></a>00116 <span class="preprocessor"></span><span class="preprocessor">        #define TBB_USE_CAPTURED_EXCEPTION 0</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span><span class="preprocessor">    #else</span>
+<a name="l00118"></a>00118 <span class="preprocessor"></span><span class="preprocessor">        #define TBB_USE_CAPTURED_EXCEPTION 1</span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00120"></a>00120 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* defined TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00121"></a>00121 <span class="preprocessor">    #if !TBB_USE_CAPTURED_EXCEPTION &amp;&amp; !__TBB_EXCEPTION_PTR_PRESENT</span>
+<a name="l00122"></a>00122 <span class="preprocessor"></span><span class="preprocessor">        #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception.</span>
+<a name="l00123"></a>00123 <span class="preprocessor"></span><span class="preprocessor">    #endif</span>
+<a name="l00124"></a>00124 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* defined TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00125"></a>00125 
+<a name="l00126"></a>00126 
+<a name="l00127"></a>00127 <span class="preprocessor">#ifndef __TBB_DEFAULT_PARTITIONER</span>
+<a name="l00128"></a>00128 <span class="preprocessor"></span><span class="preprocessor">#if TBB_DEPRECATED</span>
+<a name="l00129"></a>00129 <span class="preprocessor"></span>
+<a name="l00130"></a>00130 <span class="preprocessor">#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner</span>
+<a name="l00131"></a>00131 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00132"></a>00132 <span class="preprocessor"></span>
+<a name="l00133"></a>00133 <span class="preprocessor">#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner</span>
+<a name="l00134"></a>00134 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* TBB_DEFAULT_PARTITIONER */</span>
+<a name="l00135"></a>00135 <span class="preprocessor">#endif </span><span class="comment">/* !defined(__TBB_DEFAULT_PARTITIONER */</span>
+<a name="l00136"></a>00136 
+<a name="l00139"></a>00139 <span class="preprocessor">#if __GNUC__==4 &amp;&amp; __GNUC_MINOR__&gt;=4 &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00140"></a>00140 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_GCC_WARNING_SUPPRESSION_ENABLED 1</span>
+<a name="l00141"></a>00141 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00142"></a>00142 <span class="preprocessor"></span>
+<a name="l00149"></a>00149 <span class="preprocessor">#if _MSC_VER &amp;&amp; __INTEL_COMPILER &amp;&amp; (__INTEL_COMPILER&lt;1110 || __INTEL_COMPILER==1110 &amp;&amp; __INTEL_COMPILER_BUILD_DATE &lt; 20091012)</span>
+<a name="l00150"></a>00150 <span class="preprocessor"></span>
+<a name="l00153"></a>00153 <span class="preprocessor">    #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1</span>
+<a name="l00154"></a>00154 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00155"></a>00155 <span class="preprocessor"></span>
+<a name="l00156"></a>00156 <span class="preprocessor">#if defined(_MSC_VER) &amp;&amp; _MSC_VER &lt; 1500 &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00157"></a>00157 <span class="preprocessor"></span>
+<a name="l00159"></a>00159 <span class="preprocessor">    #define __TBB_TEMPLATE_FRIENDS_BROKEN 1</span>
+<a name="l00160"></a>00160 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00161"></a>00161 <span class="preprocessor"></span>
+<a name="l00162"></a>00162 <span class="preprocessor">#if __GLIBC__==2 &amp;&amp; __GLIBC_MINOR__==3 || __MINGW32__</span>
+<a name="l00164"></a>00164 <span class="preprocessor"></span>
+<a name="l00165"></a>00165 <span class="preprocessor">    #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1</span>
+<a name="l00166"></a>00166 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00167"></a>00167 <span class="preprocessor"></span>
+<a name="l00168"></a>00168 <span class="preprocessor">#if (_WIN32||_WIN64) &amp;&amp; __INTEL_COMPILER == 1110</span>
+<a name="l00169"></a>00169 <span class="preprocessor"></span>
+<a name="l00170"></a>00170 <span class="preprocessor">    #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1</span>
+<a name="l00171"></a>00171 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00172"></a>00172 <span class="preprocessor"></span>
+<a name="l00173"></a>00173 <span class="preprocessor">#if __GNUC__==3 &amp;&amp; __GNUC_MINOR__==3 &amp;&amp; !defined(__INTEL_COMPILER)</span>
+<a name="l00174"></a>00174 <span class="preprocessor"></span>
+<a name="l00175"></a>00175 <span class="preprocessor">    #define __TBB_GCC_3_3_PROTECTED_BROKEN 1</span>
+<a name="l00176"></a>00176 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00177"></a>00177 <span class="preprocessor"></span>
+<a name="l00178"></a>00178 <span class="preprocessor">#if __MINGW32__ &amp;&amp; (__GNUC__&lt;4 || __GNUC__==4 &amp;&amp; __GNUC_MINOR__&lt;2)</span>
+<a name="l00179"></a>00179 <span class="preprocessor"></span>
+<a name="l00181"></a>00181 <span class="preprocessor">    #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1</span>
+<a name="l00182"></a>00182 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00183"></a>00183 <span class="preprocessor"></span>
+<a name="l00184"></a>00184 <span class="preprocessor">#if __FreeBSD__</span>
+<a name="l00185"></a>00185 <span class="preprocessor"></span>
+<a name="l00187"></a>00187 <span class="preprocessor">    #define __TBB_PRIO_INHERIT_BROKEN 1</span>
+<a name="l00188"></a>00188 <span class="preprocessor"></span>
+<a name="l00191"></a>00191 <span class="preprocessor">    #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1</span>
+<a name="l00192"></a>00192 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __FreeBSD__ */</span>
+<a name="l00193"></a>00193 
+<a name="l00194"></a>00194 <span class="preprocessor">#if (__linux__ || __APPLE__) &amp;&amp; __i386__ &amp;&amp; defined(__INTEL_COMPILER)</span>
+<a name="l00195"></a>00195 <span class="preprocessor"></span>
+<a name="l00197"></a>00197 <span class="preprocessor">    #define __TBB_ICC_ASM_VOLATILE_BROKEN 1</span>
+<a name="l00198"></a>00198 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00199"></a>00199 <span class="preprocessor"></span>
+<a name="l00200"></a>00200 <span class="preprocessor">#if __LRB__ || TARGET_ARCH_MIC || TARGET_ARCH_LRB</span>
+<a name="l00201"></a>00201 <span class="preprocessor"></span><span class="preprocessor">#include "tbb_config_lrb.h"</span>
+<a name="l00202"></a>00202 <span class="preprocessor">#endif</span>
+<a name="l00203"></a>00203 <span class="preprocessor"></span>
+<a name="l00204"></a>00204 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_config_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00463.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00463.html
new file mode 100644 (file)
index 0000000..6449fe7
--- /dev/null
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_config_lrb.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_config_lrb.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_config_lrb_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_config_lrb_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#if ! (__LRB__ || TARGET_ARCH_MIC || TARGET_ARCH_LRB)</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">    #error tbb_config_lrb.h should be included only when building for LRB platform</span>
+<a name="l00026"></a>00026 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>
+<a name="l00028"></a>00028 <span class="preprocessor">#if TARGET_ARCH_MIC || TARGET_ARCH_LRB || __TBB_MIC_NATIVE</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span>
+<a name="l00030"></a>00030 <span class="preprocessor">#define __TBB_full_memory_fence __TBB_release_consistency_helper</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_Pause(x) _mm_delay_32(x)</span>
+<a name="l00032"></a>00032 <span class="preprocessor"></span>
+<a name="l00033"></a>00033 <span class="preprocessor">#if !__FreeBSD__</span>
+<a name="l00034"></a>00034 <span class="preprocessor"></span><span class="preprocessor">    #error LRB compiler does not define __FreeBSD__ anymore. Check for the __TBB_XXX_BROKEN defined under __FreeBSD__</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* !__FreeBSD__ */</span>
+<a name="l00036"></a>00036 <span class="preprocessor">#endif </span><span class="comment">/* TARGET_ARCH_MIC */</span>
+<a name="l00037"></a>00037 
+<a name="l00038"></a>00038 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_config_lrb_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00464.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00464.html
new file mode 100644 (file)
index 0000000..52485f1
--- /dev/null
@@ -0,0 +1,318 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_exception.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_exception.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_exception_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_exception_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00028"></a>00028 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#include &lt;stdexcept&gt;</span>
+<a name="l00033"></a>00033 <span class="preprocessor">#include &lt;string&gt;</span> <span class="comment">// required to construct std exception classes</span>
+<a name="l00034"></a>00034 
+<a name="l00035"></a>00035 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span>
+<a name="l00039"></a>00039 <span class="keyword">namespace </span>tbb {
+<a name="l00040"></a>00040 
+<a name="l00042"></a><a class="code" href="a00151.html">00042</a> <span class="keyword">class </span><a class="code" href="a00151.html">bad_last_alloc</a> : <span class="keyword">public</span> std::bad_alloc {
+<a name="l00043"></a>00043 <span class="keyword">public</span>:
+<a name="l00044"></a>00044     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* what() <span class="keyword">const</span> <span class="keywordflow">throw</span>();
+<a name="l00045"></a>00045 <span class="preprocessor">#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span>    <span class="comment">/*override*/</span> ~<a class="code" href="a00151.html">bad_last_alloc</a>() <span class="keywordflow">throw</span>() {}
+<a name="l00047"></a>00047 <span class="preprocessor">#endif</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span>};
+<a name="l00049"></a>00049 
+<a name="l00051"></a><a class="code" href="a00173.html">00051</a> <span class="keyword">class </span><a class="code" href="a00173.html">improper_lock</a> : <span class="keyword">public</span> std::exception {
+<a name="l00052"></a>00052 <span class="keyword">public</span>:
+<a name="l00053"></a>00053     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* what() <span class="keyword">const</span> <span class="keywordflow">throw</span>();
+<a name="l00054"></a>00054 };
+<a name="l00055"></a>00055 
+<a name="l00057"></a><a class="code" href="a00175.html">00057</a> <span class="keyword">class </span><a class="code" href="a00175.html">missing_wait</a> : <span class="keyword">public</span> std::exception {
+<a name="l00058"></a>00058 <span class="keyword">public</span>:
+<a name="l00059"></a>00059     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* what() <span class="keyword">const</span> <span class="keywordflow">throw</span>();
+<a name="l00060"></a>00060 };
+<a name="l00061"></a>00061 
+<a name="l00063"></a><a class="code" href="a00174.html">00063</a> <span class="keyword">class </span><a class="code" href="a00174.html">invalid_multiple_scheduling</a> : <span class="keyword">public</span> std::exception {
+<a name="l00064"></a>00064 <span class="keyword">public</span>:
+<a name="l00065"></a>00065     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* what() <span class="keyword">const</span> <span class="keywordflow">throw</span>();
+<a name="l00066"></a>00066 };
+<a name="l00067"></a>00067 
+<a name="l00068"></a>00068 <span class="keyword">namespace </span>internal {
+<a name="l00070"></a>00070 <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4();
+<a name="l00071"></a>00071 
+<a name="l00072"></a>00072 <span class="keyword">enum</span> exception_id {
+<a name="l00073"></a>00073     eid_bad_alloc = 1,
+<a name="l00074"></a>00074     eid_bad_last_alloc,
+<a name="l00075"></a>00075     eid_nonpositive_step,
+<a name="l00076"></a>00076     eid_out_of_range,
+<a name="l00077"></a>00077     eid_segment_range_error,
+<a name="l00078"></a>00078     eid_index_range_error,
+<a name="l00079"></a>00079     eid_missing_wait,
+<a name="l00080"></a>00080     eid_invalid_multiple_scheduling,
+<a name="l00081"></a>00081     eid_improper_lock,
+<a name="l00082"></a>00082     eid_possible_deadlock,
+<a name="l00083"></a>00083     eid_operation_not_permitted,
+<a name="l00084"></a>00084     eid_condvar_wait_failed,
+<a name="l00085"></a>00085     eid_invalid_load_factor,
+<a name="l00086"></a>00086     eid_reserved, <span class="comment">// free slot for backward compatibility, can be reused.</span>
+<a name="l00087"></a>00087     eid_invalid_swap,
+<a name="l00088"></a>00088     eid_reservation_length_error,
+<a name="l00089"></a>00089     eid_invalid_key,
+<a name="l00091"></a>00091 
+<a name="l00093"></a>00093     eid_max
+<a name="l00094"></a>00094 };
+<a name="l00095"></a>00095 
+<a name="l00097"></a>00097 
+<a name="l00099"></a>00099 <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id );
+<a name="l00100"></a>00100 
+<a name="l00102"></a>00102 <span class="keyword">inline</span> <span class="keywordtype">void</span> throw_exception ( exception_id eid ) { throw_exception_v4(eid); }
+<a name="l00103"></a>00103 
+<a name="l00104"></a>00104 } <span class="comment">// namespace internal</span>
+<a name="l00105"></a>00105 } <span class="comment">// namespace tbb</span>
+<a name="l00106"></a>00106 
+<a name="l00107"></a>00107 <span class="preprocessor">#if __TBB_TASK_GROUP_CONTEXT</span>
+<a name="l00108"></a>00108 <span class="preprocessor"></span><span class="preprocessor">#include "tbb_allocator.h"</span>
+<a name="l00109"></a>00109 <span class="preprocessor">#include &lt;exception&gt;</span>
+<a name="l00110"></a>00110 <span class="preprocessor">#include &lt;typeinfo&gt;</span>
+<a name="l00111"></a>00111 <span class="preprocessor">#include &lt;new&gt;</span>
+<a name="l00112"></a>00112 
+<a name="l00113"></a>00113 <span class="keyword">namespace </span>tbb {
+<a name="l00114"></a>00114 
+<a name="l00116"></a>00116 
+<a name="l00136"></a><a class="code" href="a00211.html">00136</a> <span class="keyword">class </span><a class="code" href="a00211.html">tbb_exception</a> : <span class="keyword">public</span> std::exception
+<a name="l00137"></a>00137 {
+<a name="l00141"></a>00141     <span class="keywordtype">void</span>* operator new ( size_t );
+<a name="l00142"></a>00142 
+<a name="l00143"></a>00143 <span class="keyword">public</span>:
+<a name="l00145"></a>00145 
+<a name="l00146"></a>00146     <span class="keyword">virtual</span> <a class="code" href="a00211.html">tbb_exception</a>* <a class="code" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">move</a> () <span class="keywordflow">throw</span>() = 0;
+<a name="l00147"></a>00147     
+<a name="l00149"></a>00149 
+<a name="l00151"></a>00151     <span class="keyword">virtual</span> <span class="keywordtype">void</span> <a class="code" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">destroy</a> () <span class="keywordflow">throw</span>() = 0;
+<a name="l00152"></a>00152 
+<a name="l00154"></a>00154 
+<a name="l00158"></a>00158     <span class="keyword">virtual</span> <span class="keywordtype">void</span> <a class="code" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">throw_self</a> () = 0;
+<a name="l00159"></a>00159 
+<a name="l00161"></a>00161     <span class="keyword">virtual</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="a00211.html#d00f6497e552fee978a02bfcbebf46e2">name</a>() <span class="keyword">const</span> <span class="keywordflow">throw</span>() = 0;
+<a name="l00162"></a>00162 
+<a name="l00164"></a>00164     <span class="keyword">virtual</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">what</a>() <span class="keyword">const</span> <span class="keywordflow">throw</span>() = 0;
+<a name="l00165"></a>00165 
+<a name="l00172"></a><a class="code" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">00172</a>     <span class="keywordtype">void</span> <a class="code" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">operator delete </a>( <span class="keywordtype">void</span>* p ) {
+<a name="l00173"></a>00173         internal::deallocate_via_handler_v3(p);
+<a name="l00174"></a>00174     }
+<a name="l00175"></a>00175 };
+<a name="l00176"></a>00176 
+<a name="l00178"></a>00178 
+<a name="l00182"></a><a class="code" href="a00157.html">00182</a> <span class="keyword">class </span><a class="code" href="a00157.html">captured_exception</a> : <span class="keyword">public</span> <a class="code" href="a00211.html">tbb_exception</a>
+<a name="l00183"></a>00183 {
+<a name="l00184"></a>00184 <span class="keyword">public</span>:
+<a name="l00185"></a>00185     <a class="code" href="a00157.html">captured_exception</a> ( <span class="keyword">const</span> <a class="code" href="a00157.html">captured_exception</a>&amp; src )
+<a name="l00186"></a>00186         : <a class="code" href="a00211.html">tbb_exception</a>(src), my_dynamic(<span class="keyword">false</span>)
+<a name="l00187"></a>00187     {
+<a name="l00188"></a>00188         set(src.<a class="code" href="a00157.html#6a57f9aa0bce72392492d109b9e1d941">my_exception_name</a>, src.<a class="code" href="a00157.html#788adf4e55cdad7abc178a48a1d47e69">my_exception_info</a>);
+<a name="l00189"></a>00189     }
+<a name="l00190"></a>00190 
+<a name="l00191"></a>00191     <a class="code" href="a00157.html">captured_exception</a> ( <span class="keyword">const</span> <span class="keywordtype">char</span>* name_, <span class="keyword">const</span> <span class="keywordtype">char</span>* info )
+<a name="l00192"></a>00192         : my_dynamic(<span class="keyword">false</span>)
+<a name="l00193"></a>00193     {
+<a name="l00194"></a>00194         set(name_, info);
+<a name="l00195"></a>00195     }
+<a name="l00196"></a>00196 
+<a name="l00197"></a>00197     __TBB_EXPORTED_METHOD ~<a class="code" href="a00157.html">captured_exception</a> () <span class="keywordflow">throw</span>() {
+<a name="l00198"></a>00198         clear();
+<a name="l00199"></a>00199     }
+<a name="l00200"></a>00200 
+<a name="l00201"></a>00201     <a class="code" href="a00157.html">captured_exception</a>&amp; operator= ( <span class="keyword">const</span> <a class="code" href="a00157.html">captured_exception</a>&amp; src ) {
+<a name="l00202"></a>00202         <span class="keywordflow">if</span> ( <span class="keyword">this</span> != &amp;src ) {
+<a name="l00203"></a>00203             clear();
+<a name="l00204"></a>00204             set(src.<a class="code" href="a00157.html#6a57f9aa0bce72392492d109b9e1d941">my_exception_name</a>, src.<a class="code" href="a00157.html#788adf4e55cdad7abc178a48a1d47e69">my_exception_info</a>);
+<a name="l00205"></a>00205         }
+<a name="l00206"></a>00206         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00207"></a>00207     }
+<a name="l00208"></a>00208 
+<a name="l00209"></a>00209     <span class="comment">/*override*/</span> 
+<a name="l00210"></a>00210     <a class="code" href="a00157.html">captured_exception</a>* __TBB_EXPORTED_METHOD <a class="code" href="a00157.html#837a50b8f6a800bda225c39d1699643f">move</a> () <span class="keywordflow">throw</span>();
+<a name="l00211"></a>00211 
+<a name="l00212"></a>00212     <span class="comment">/*override*/</span> 
+<a name="l00213"></a>00213     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD <a class="code" href="a00157.html#93d875d3555502ff6f18513525de204c">destroy</a> () <span class="keywordflow">throw</span>();
+<a name="l00214"></a>00214 
+<a name="l00215"></a>00215     <span class="comment">/*override*/</span> 
+<a name="l00216"></a><a class="code" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">00216</a>     <span class="keywordtype">void</span> <a class="code" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">throw_self</a> () { __TBB_THROW(*<span class="keyword">this</span>); }
+<a name="l00217"></a>00217 
+<a name="l00218"></a>00218     <span class="comment">/*override*/</span> 
+<a name="l00219"></a>00219     <span class="keyword">const</span> <span class="keywordtype">char</span>* __TBB_EXPORTED_METHOD <a class="code" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">name</a>() const throw();
+<a name="l00220"></a>00220 
+<a name="l00221"></a>00221     <span class="comment">/*override*/</span> 
+<a name="l00222"></a>00222     const <span class="keywordtype">char</span>* __TBB_EXPORTED_METHOD <a class="code" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">what</a>() const throw();
+<a name="l00223"></a>00223 
+<a name="l00224"></a>00224     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD set ( const <span class="keywordtype">char</span>* <a class="code" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">name</a>, const <span class="keywordtype">char</span>* info ) throw();
+<a name="l00225"></a>00225     <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD clear () throw();
+<a name="l00226"></a>00226 
+<a name="l00227"></a>00227 private:
+<a name="l00229"></a>00229     <a class="code" href="a00157.html">captured_exception</a>() {}
+<a name="l00230"></a>00230 
+<a name="l00232"></a>00232     <span class="keyword">static</span> captured_exception* allocate ( <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">name</a>, <span class="keyword">const</span> <span class="keywordtype">char</span>* info );
+<a name="l00233"></a>00233 
+<a name="l00234"></a>00234     <span class="keywordtype">bool</span> my_dynamic;
+<a name="l00235"></a>00235     <span class="keyword">const</span> <span class="keywordtype">char</span>* my_exception_name;
+<a name="l00236"></a>00236     <span class="keyword">const</span> <span class="keywordtype">char</span>* my_exception_info;
+<a name="l00237"></a>00237 };
+<a name="l00238"></a>00238 
+<a name="l00240"></a>00240 
+<a name="l00244"></a>00244 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> ExceptionData&gt;
+<a name="l00245"></a><a class="code" href="a00176.html">00245</a> <span class="keyword">class </span><a class="code" href="a00176.html">movable_exception</a> : <span class="keyword">public</span> <a class="code" href="a00211.html">tbb_exception</a>
+<a name="l00246"></a>00246 {
+<a name="l00247"></a>00247     <span class="keyword">typedef</span> <a class="code" href="a00176.html">movable_exception&lt;ExceptionData&gt;</a> <a class="code" href="a00176.html">self_type</a>;
+<a name="l00248"></a>00248 
+<a name="l00249"></a>00249 <span class="keyword">public</span>:
+<a name="l00250"></a>00250     <a class="code" href="a00176.html">movable_exception</a> ( <span class="keyword">const</span> ExceptionData&amp; data_ ) 
+<a name="l00251"></a>00251         : <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>(data_)
+<a name="l00252"></a>00252         , my_dynamic(<span class="keyword">false</span>)
+<a name="l00253"></a>00253         , my_exception_name(
+<a name="l00254"></a>00254 #<span class="keywordflow">if</span> TBB_USE_EXCEPTIONS
+<a name="l00255"></a>00255         <span class="keyword">typeid</span>(<a class="code" href="a00176.html">self_type</a>).<a class="code" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">name</a>()
+<a name="l00256"></a>00256 #<span class="keywordflow">else</span> <span class="comment">/* !TBB_USE_EXCEPTIONS */</span>
+<a name="l00257"></a>00257         <span class="stringliteral">"movable_exception"</span>
+<a name="l00258"></a>00258 #endif <span class="comment">/* !TBB_USE_EXCEPTIONS */</span>
+<a name="l00259"></a>00259         )
+<a name="l00260"></a>00260     {}
+<a name="l00261"></a>00261 
+<a name="l00262"></a>00262     <a class="code" href="a00176.html">movable_exception</a> ( <span class="keyword">const</span> <a class="code" href="a00176.html">movable_exception</a>&amp; src ) <span class="keywordflow">throw</span> () 
+<a name="l00263"></a>00263         : <a class="code" href="a00211.html">tbb_exception</a>(src)
+<a name="l00264"></a>00264         , <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>(src.<a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>)
+<a name="l00265"></a>00265         , my_dynamic(<span class="keyword">false</span>)
+<a name="l00266"></a>00266         , my_exception_name(src.<a class="code" href="a00176.html#5f5843b501829ff824c9c8d28c8283eb">my_exception_name</a>)
+<a name="l00267"></a>00267     {}
+<a name="l00268"></a>00268 
+<a name="l00269"></a>00269     ~<a class="code" href="a00176.html">movable_exception</a> () <span class="keywordflow">throw</span>() {}
+<a name="l00270"></a>00270 
+<a name="l00271"></a>00271     <span class="keyword">const</span> <a class="code" href="a00176.html">movable_exception</a>&amp; operator= ( <span class="keyword">const</span> <a class="code" href="a00176.html">movable_exception</a>&amp; src ) {
+<a name="l00272"></a>00272         <span class="keywordflow">if</span> ( <span class="keyword">this</span> != &amp;src ) {
+<a name="l00273"></a>00273             <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a> = src.<a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>;
+<a name="l00274"></a>00274             my_exception_name = src.<a class="code" href="a00176.html#5f5843b501829ff824c9c8d28c8283eb">my_exception_name</a>;
+<a name="l00275"></a>00275         }
+<a name="l00276"></a>00276         <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00277"></a>00277     }
+<a name="l00278"></a>00278 
+<a name="l00279"></a>00279     ExceptionData&amp; data () <span class="keywordflow">throw</span>() { <span class="keywordflow">return</span> <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>; }
+<a name="l00280"></a>00280 
+<a name="l00281"></a>00281     <span class="keyword">const</span> ExceptionData&amp; data () <span class="keyword">const</span> <span class="keywordflow">throw</span>() { <span class="keywordflow">return</span> <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>; }
+<a name="l00282"></a>00282 
+<a name="l00283"></a><a class="code" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">00283</a>     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">name</a> () const throw() { <span class="keywordflow">return</span> my_exception_name; }
+<a name="l00284"></a>00284 
+<a name="l00285"></a><a class="code" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">00285</a>     <span class="comment">/*override*/</span> <span class="keyword">const</span> <span class="keywordtype">char</span>* <a class="code" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">what</a> () const throw() { <span class="keywordflow">return</span> <span class="stringliteral">"tbb::movable_exception"</span>; }
+<a name="l00286"></a>00286 
+<a name="l00287"></a>00287     <span class="comment">/*override*/</span> 
+<a name="l00288"></a><a class="code" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">00288</a>     <a class="code" href="a00176.html">movable_exception</a>* <a class="code" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">move</a> () throw() {
+<a name="l00289"></a>00289         <span class="keywordtype">void</span>* e = internal::allocate_via_handler_v3(<span class="keyword">sizeof</span>(<a class="code" href="a00176.html">movable_exception</a>));
+<a name="l00290"></a>00290         <span class="keywordflow">if</span> ( e ) {
+<a name="l00291"></a>00291             ::new (e) movable_exception(*<span class="keyword">this</span>);
+<a name="l00292"></a>00292             ((movable_exception*)e)-&gt;my_dynamic = <span class="keyword">true</span>;
+<a name="l00293"></a>00293         }
+<a name="l00294"></a>00294         <span class="keywordflow">return</span> (movable_exception*)e;
+<a name="l00295"></a>00295     }
+<a name="l00296"></a>00296     <span class="comment">/*override*/</span> 
+<a name="l00297"></a><a class="code" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">00297</a>     <span class="keywordtype">void</span> <a class="code" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">destroy</a> () throw() {
+<a name="l00298"></a>00298         __TBB_ASSERT ( my_dynamic, <span class="stringliteral">"Method destroy can be called only on dynamically allocated movable_exceptions"</span> );
+<a name="l00299"></a>00299         <span class="keywordflow">if</span> ( my_dynamic ) {
+<a name="l00300"></a>00300             this-&gt;~<a class="code" href="a00176.html">movable_exception</a>();
+<a name="l00301"></a>00301             internal::deallocate_via_handler_v3(<span class="keyword">this</span>);
+<a name="l00302"></a>00302         }
+<a name="l00303"></a>00303     }
+<a name="l00304"></a>00304     <span class="comment">/*override*/</span> 
+<a name="l00305"></a><a class="code" href="a00176.html#17cffba35811c92b7e65d63506b69602">00305</a>     <span class="keywordtype">void</span> <a class="code" href="a00176.html#17cffba35811c92b7e65d63506b69602">throw_self</a> () { __TBB_THROW( *<span class="keyword">this</span> ); }
+<a name="l00306"></a>00306 
+<a name="l00307"></a>00307 <span class="keyword">protected</span>:
+<a name="l00309"></a><a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">00309</a>     ExceptionData  <a class="code" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">my_exception_data</a>;
+<a name="l00310"></a>00310 
+<a name="l00311"></a>00311 <span class="keyword">private</span>:
+<a name="l00313"></a>00313     <span class="keywordtype">bool</span> my_dynamic;
+<a name="l00314"></a>00314 
+<a name="l00316"></a>00316 
+<a name="l00317"></a>00317     <span class="keyword">const</span> <span class="keywordtype">char</span>* my_exception_name;
+<a name="l00318"></a>00318 };
+<a name="l00319"></a>00319 
+<a name="l00320"></a>00320 <span class="preprocessor">#if !TBB_USE_CAPTURED_EXCEPTION</span>
+<a name="l00321"></a>00321 <span class="preprocessor"></span><span class="keyword">namespace </span>internal {
+<a name="l00322"></a>00322 
+<a name="l00324"></a>00324 
+<a name="l00326"></a><a class="code" href="a00212.html">00326</a> <span class="keyword">class </span><a class="code" href="a00212.html">tbb_exception_ptr</a> {
+<a name="l00327"></a>00327     std::exception_ptr  my_ptr;
+<a name="l00328"></a>00328 
+<a name="l00329"></a>00329 <span class="keyword">public</span>:
+<a name="l00330"></a>00330     <span class="keyword">static</span> <a class="code" href="a00212.html">tbb_exception_ptr</a>* allocate ();
+<a name="l00331"></a>00331     <span class="keyword">static</span> <a class="code" href="a00212.html">tbb_exception_ptr</a>* allocate ( <span class="keyword">const</span> <a class="code" href="a00211.html">tbb_exception</a>&amp; tag );
+<a name="l00333"></a>00333     <span class="keyword">static</span> <a class="code" href="a00212.html">tbb_exception_ptr</a>* allocate ( <a class="code" href="a00157.html">captured_exception</a>&amp; src );
+<a name="l00334"></a>00334     
+<a name="l00336"></a>00336 
+<a name="l00337"></a>00337     <span class="keywordtype">void</span> <a class="code" href="a00212.html#921875bbacd2c8a5f324c7da7a415262">destroy</a> () <span class="keywordflow">throw</span>();
+<a name="l00338"></a>00338 
+<a name="l00340"></a><a class="code" href="a00212.html#292832fd5c523e3d8081a22247840a1d">00340</a>     <span class="keywordtype">void</span> <a class="code" href="a00212.html#292832fd5c523e3d8081a22247840a1d">throw_self</a> () { std::rethrow_exception(my_ptr); }
+<a name="l00341"></a>00341 
+<a name="l00342"></a>00342 <span class="keyword">private</span>:
+<a name="l00343"></a>00343     <a class="code" href="a00212.html">tbb_exception_ptr</a> ( <span class="keyword">const</span> std::exception_ptr&amp; src ) : my_ptr(src) {}
+<a name="l00344"></a>00344     tbb_exception_ptr ( <span class="keyword">const</span> <a class="code" href="a00157.html">captured_exception</a>&amp; src ) : my_ptr(std::copy_exception(src)) {}
+<a name="l00345"></a>00345 }; <span class="comment">// class tbb::internal::tbb_exception_ptr</span>
+<a name="l00346"></a>00346 
+<a name="l00347"></a>00347 } <span class="comment">// namespace internal</span>
+<a name="l00348"></a>00348 <span class="preprocessor">#endif </span><span class="comment">/* !TBB_USE_CAPTURED_EXCEPTION */</span>
+<a name="l00349"></a>00349 
+<a name="l00350"></a>00350 } <span class="comment">// namespace tbb</span>
+<a name="l00351"></a>00351 
+<a name="l00352"></a>00352 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_TASK_GROUP_CONTEXT */</span>
+<a name="l00353"></a>00353 
+<a name="l00354"></a>00354 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_exception_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00465.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00465.html
new file mode 100644 (file)
index 0000000..da2c183
--- /dev/null
@@ -0,0 +1,692 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_machine.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_machine.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_machine_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_machine_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>
+<a name="l00028"></a>00028 <span class="preprocessor">#ifdef _MANAGED</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">#pragma managed(push, off)</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="preprocessor">#if __MINGW64__</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_intel64.h"</span>
+<a name="l00034"></a>00034 <span class="keyword">extern</span> <span class="stringliteral">"C"</span> __declspec(dllimport) int __stdcall SwitchToThread( <span class="keywordtype">void</span> );
+<a name="l00035"></a>00035 <span class="preprocessor">#define __TBB_Yield()  SwitchToThread()</span>
+<a name="l00036"></a>00036 <span class="preprocessor"></span><span class="preprocessor">#elif __MINGW32__</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_ia32.h"</span>
+<a name="l00038"></a>00038 <span class="keyword">extern</span> <span class="stringliteral">"C"</span> __declspec(dllimport) int __stdcall SwitchToThread( <span class="keywordtype">void</span> );
+<a name="l00039"></a>00039 <span class="preprocessor">#define __TBB_Yield()  SwitchToThread()</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span><span class="preprocessor">#elif defined(_M_IX86)</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_ia32.h"</span>
+<a name="l00042"></a>00042 <span class="preprocessor">#elif defined(_M_AMD64) </span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_intel64.h"</span>
+<a name="l00044"></a>00044 <span class="preprocessor">#elif _XBOX </span>
+<a name="l00045"></a>00045 <span class="preprocessor"></span><span class="preprocessor">#include "machine/xbox360_ppc.h"</span>
+<a name="l00046"></a>00046 <span class="preprocessor">#endif</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span>
+<a name="l00048"></a>00048 <span class="preprocessor">#ifdef _MANAGED</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span><span class="preprocessor">#pragma managed(pop)</span>
+<a name="l00050"></a>00050 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span>
+<a name="l00052"></a>00052 <span class="preprocessor">#elif __linux__ || __FreeBSD__</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span>
+<a name="l00054"></a>00054 <span class="preprocessor">#if __i386__</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_ia32.h"</span>
+<a name="l00056"></a>00056 <span class="preprocessor">#elif __x86_64__</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_intel64.h"</span>
+<a name="l00058"></a>00058 <span class="preprocessor">#elif __ia64__</span>
+<a name="l00059"></a>00059 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_ia64.h"</span>
+<a name="l00060"></a>00060 <span class="preprocessor">#elif __powerpc__</span>
+<a name="l00061"></a>00061 <span class="preprocessor"></span><span class="preprocessor">#include "machine/mac_ppc.h"</span>
+<a name="l00062"></a>00062 <span class="preprocessor">#endif</span>
+<a name="l00063"></a>00063 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_common.h"</span>
+<a name="l00064"></a>00064 
+<a name="l00065"></a>00065 <span class="preprocessor">#elif __APPLE__</span>
+<a name="l00066"></a>00066 <span class="preprocessor"></span>
+<a name="l00067"></a>00067 <span class="preprocessor">#if __i386__</span>
+<a name="l00068"></a>00068 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_ia32.h"</span>
+<a name="l00069"></a>00069 <span class="preprocessor">#elif __x86_64__</span>
+<a name="l00070"></a>00070 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_intel64.h"</span>
+<a name="l00071"></a>00071 <span class="preprocessor">#elif __POWERPC__</span>
+<a name="l00072"></a>00072 <span class="preprocessor"></span><span class="preprocessor">#include "machine/mac_ppc.h"</span>
+<a name="l00073"></a>00073 <span class="preprocessor">#endif</span>
+<a name="l00074"></a>00074 <span class="preprocessor"></span><span class="preprocessor">#include "machine/macos_common.h"</span>
+<a name="l00075"></a>00075 
+<a name="l00076"></a>00076 <span class="preprocessor">#elif _AIX</span>
+<a name="l00077"></a>00077 <span class="preprocessor"></span>
+<a name="l00078"></a>00078 <span class="preprocessor">#include "machine/ibm_aix51.h"</span>
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080 <span class="preprocessor">#elif __sun || __SUNPRO_CC</span>
+<a name="l00081"></a>00081 <span class="preprocessor"></span>
+<a name="l00082"></a>00082 <span class="preprocessor">#define __asm__ asm </span>
+<a name="l00083"></a>00083 <span class="preprocessor"></span><span class="preprocessor">#define __volatile__ volatile</span>
+<a name="l00084"></a>00084 <span class="preprocessor"></span><span class="preprocessor">#if __i386  || __i386__</span>
+<a name="l00085"></a>00085 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_ia32.h"</span>
+<a name="l00086"></a>00086 <span class="preprocessor">#elif __x86_64__</span>
+<a name="l00087"></a>00087 <span class="preprocessor"></span><span class="preprocessor">#include "machine/linux_intel64.h"</span>
+<a name="l00088"></a>00088 <span class="preprocessor">#elif __sparc</span>
+<a name="l00089"></a>00089 <span class="preprocessor"></span><span class="preprocessor">#include "machine/sunos_sparc.h"</span>
+<a name="l00090"></a>00090 <span class="preprocessor">#endif</span>
+<a name="l00091"></a>00091 <span class="preprocessor"></span><span class="preprocessor">#include &lt;sched.h&gt;</span>
+<a name="l00092"></a>00092 <span class="preprocessor">#define __TBB_Yield() sched_yield()</span>
+<a name="l00093"></a>00093 <span class="preprocessor"></span>
+<a name="l00094"></a>00094 <span class="preprocessor">#endif</span>
+<a name="l00095"></a>00095 <span class="preprocessor"></span>
+<a name="l00097"></a>00097 
+<a name="l00109"></a>00109 <span class="preprocessor">#if    !defined(__TBB_CompareAndSwap4) \</span>
+<a name="l00110"></a>00110 <span class="preprocessor">    || !defined(__TBB_CompareAndSwap8) \</span>
+<a name="l00111"></a>00111 <span class="preprocessor">    || !defined(__TBB_Yield)           \</span>
+<a name="l00112"></a>00112 <span class="preprocessor">    || !defined(__TBB_full_memory_fence)    \</span>
+<a name="l00113"></a>00113 <span class="preprocessor">    || !defined(__TBB_release_consistency_helper)</span>
+<a name="l00114"></a>00114 <span class="preprocessor"></span><span class="preprocessor">#error Minimal requirements for tbb_machine.h not satisfied; platform is not supported.</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00116"></a>00116 <span class="preprocessor"></span>
+<a name="l00117"></a>00117 <span class="preprocessor">#ifndef __TBB_Pause</span>
+<a name="l00118"></a>00118 <span class="preprocessor"></span>    <span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_Pause(int32_t) {
+<a name="l00119"></a>00119         __TBB_Yield();
+<a name="l00120"></a>00120     }
+<a name="l00121"></a>00121 <span class="preprocessor">#endif</span>
+<a name="l00122"></a>00122 <span class="preprocessor"></span>
+<a name="l00123"></a>00123 <span class="keyword">namespace </span>tbb {
+<a name="l00124"></a>00124 <span class="keyword">namespace </span>internal {
+<a name="l00125"></a>00125 
+<a name="l00127"></a>00127 
+<a name="l00128"></a><a class="code" href="a00149.html">00128</a> <span class="keyword">class </span><a class="code" href="a00149.html">atomic_backoff</a> : no_copy {
+<a name="l00130"></a>00130 
+<a name="l00132"></a>00132     <span class="keyword">static</span> <span class="keyword">const</span> int32_t LOOPS_BEFORE_YIELD = 16;
+<a name="l00133"></a>00133     int32_t count;
+<a name="l00134"></a>00134 <span class="keyword">public</span>:
+<a name="l00135"></a>00135     <a class="code" href="a00149.html">atomic_backoff</a>() : count(1) {}
+<a name="l00136"></a>00136 
+<a name="l00138"></a><a class="code" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">00138</a>     <span class="keywordtype">void</span> <a class="code" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a>() {
+<a name="l00139"></a>00139         <span class="keywordflow">if</span>( count&lt;=LOOPS_BEFORE_YIELD ) {
+<a name="l00140"></a>00140             __TBB_Pause(count);
+<a name="l00141"></a>00141             <span class="comment">// Pause twice as long the next time.</span>
+<a name="l00142"></a>00142             count*=2;
+<a name="l00143"></a>00143         } <span class="keywordflow">else</span> {
+<a name="l00144"></a>00144             <span class="comment">// Pause is so long that we might as well yield CPU to scheduler.</span>
+<a name="l00145"></a>00145             __TBB_Yield();
+<a name="l00146"></a>00146         }
+<a name="l00147"></a>00147     }
+<a name="l00148"></a>00148 
+<a name="l00149"></a>00149     <span class="comment">// pause for a few times and then return false immediately.</span>
+<a name="l00150"></a>00150     <span class="keywordtype">bool</span> bounded_pause() {
+<a name="l00151"></a>00151         <span class="keywordflow">if</span>( count&lt;=LOOPS_BEFORE_YIELD ) {
+<a name="l00152"></a>00152             __TBB_Pause(count);
+<a name="l00153"></a>00153             <span class="comment">// Pause twice as long the next time.</span>
+<a name="l00154"></a>00154             count*=2;
+<a name="l00155"></a>00155             <span class="keywordflow">return</span> <span class="keyword">true</span>;
+<a name="l00156"></a>00156         } <span class="keywordflow">else</span> {
+<a name="l00157"></a>00157             <span class="keywordflow">return</span> <span class="keyword">false</span>;
+<a name="l00158"></a>00158         }
+<a name="l00159"></a>00159     }
+<a name="l00160"></a>00160 
+<a name="l00161"></a>00161     <span class="keywordtype">void</span> reset() {
+<a name="l00162"></a>00162         count = 1;
+<a name="l00163"></a>00163     }
+<a name="l00164"></a>00164 };
+<a name="l00165"></a>00165 
+<a name="l00167"></a>00167 
+<a name="l00168"></a>00168 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00169"></a>00169 <span class="keywordtype">void</span> spin_wait_while_eq( <span class="keyword">const</span> <span class="keyword">volatile</span> T&amp; location, U value ) {
+<a name="l00170"></a>00170     atomic_backoff backoff;
+<a name="l00171"></a>00171     <span class="keywordflow">while</span>( location==value ) backoff.pause();
+<a name="l00172"></a>00172 }
+<a name="l00173"></a>00173 
+<a name="l00175"></a>00175 
+<a name="l00176"></a>00176 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt;
+<a name="l00177"></a>00177 <span class="keywordtype">void</span> spin_wait_until_eq( <span class="keyword">const</span> <span class="keyword">volatile</span> T&amp; location, <span class="keyword">const</span> U value ) {
+<a name="l00178"></a>00178     atomic_backoff backoff;
+<a name="l00179"></a>00179     <span class="keywordflow">while</span>( location!=value ) backoff.pause();
+<a name="l00180"></a>00180 }
+<a name="l00181"></a>00181 
+<a name="l00182"></a>00182 <span class="comment">// T should be unsigned, otherwise sign propagation will break correctness of bit manipulations.</span>
+<a name="l00183"></a>00183 <span class="comment">// S should be either 1 or 2, for the mask calculation to work correctly.</span>
+<a name="l00184"></a>00184 <span class="comment">// Together, these rules limit applicability of Masked CAS to unsigned char and unsigned short.</span>
+<a name="l00185"></a>00185 <span class="keyword">template</span>&lt;size_t S, <span class="keyword">typename</span> T&gt;
+<a name="l00186"></a>00186 <span class="keyword">inline</span> T __TBB_MaskedCompareAndSwap (<span class="keyword">volatile</span> T *ptr, T value, T comparand ) {
+<a name="l00187"></a>00187     <span class="keyword">volatile</span> uint32_t * base = (uint32_t*)( (uintptr_t)ptr &amp; ~(uintptr_t)0x3 );
+<a name="l00188"></a>00188 <span class="preprocessor">#if __TBB_BIG_ENDIAN</span>
+<a name="l00189"></a>00189 <span class="preprocessor"></span>    <span class="keyword">const</span> uint8_t bitoffset = uint8_t( 8*( 4-S - (uintptr_t(ptr) &amp; 0x3) ) );
+<a name="l00190"></a>00190 <span class="preprocessor">#else</span>
+<a name="l00191"></a>00191 <span class="preprocessor"></span>    <span class="keyword">const</span> uint8_t bitoffset = uint8_t( 8*((uintptr_t)ptr &amp; 0x3) );
+<a name="l00192"></a>00192 <span class="preprocessor">#endif</span>
+<a name="l00193"></a>00193 <span class="preprocessor"></span>    <span class="keyword">const</span> uint32_t mask = ( (1&lt;&lt;(S*8)) - 1 )&lt;&lt;bitoffset;
+<a name="l00194"></a>00194     atomic_backoff b;
+<a name="l00195"></a>00195     uint32_t result;
+<a name="l00196"></a>00196     <span class="keywordflow">for</span>(;;) {
+<a name="l00197"></a>00197         result = *base; <span class="comment">// reload the base value which might change during the pause</span>
+<a name="l00198"></a>00198         uint32_t old_value = ( result &amp; ~mask ) | ( comparand &lt;&lt; bitoffset );
+<a name="l00199"></a>00199         uint32_t new_value = ( result &amp; ~mask ) | ( value &lt;&lt; bitoffset );
+<a name="l00200"></a>00200         <span class="comment">// __TBB_CompareAndSwap4 presumed to have full fence. </span>
+<a name="l00201"></a>00201         result = __TBB_CompareAndSwap4( base, new_value, old_value );
+<a name="l00202"></a>00202         <span class="keywordflow">if</span>(  result==old_value               <span class="comment">// CAS succeeded</span>
+<a name="l00203"></a>00203           || ((result^old_value)&amp;mask)!=0 )  <span class="comment">// CAS failed and the bits of interest have changed</span>
+<a name="l00204"></a>00204             <span class="keywordflow">break</span>;
+<a name="l00205"></a>00205         <span class="keywordflow">else</span>                                 <span class="comment">// CAS failed but the bits of interest left unchanged</span>
+<a name="l00206"></a>00206             b.pause();
+<a name="l00207"></a>00207     }
+<a name="l00208"></a>00208     <span class="keywordflow">return</span> T((result &amp; mask) &gt;&gt; bitoffset);
+<a name="l00209"></a>00209 }
+<a name="l00210"></a>00210 
+<a name="l00211"></a>00211 <span class="keyword">template</span>&lt;size_t S, <span class="keyword">typename</span> T&gt;
+<a name="l00212"></a>00212 <span class="keyword">inline</span> T __TBB_CompareAndSwapGeneric (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, T value, T comparand ) { 
+<a name="l00213"></a>00213     <span class="keywordflow">return</span> __TBB_CompareAndSwapW((T *)ptr,value,comparand);
+<a name="l00214"></a>00214 }
+<a name="l00215"></a>00215 
+<a name="l00216"></a>00216 <span class="keyword">template</span>&lt;&gt;
+<a name="l00217"></a>00217 <span class="keyword">inline</span> uint8_t __TBB_CompareAndSwapGeneric &lt;1,uint8_t&gt; (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, uint8_t value, uint8_t comparand ) {
+<a name="l00218"></a>00218 #ifdef __TBB_CompareAndSwap1
+<a name="l00219"></a>00219     <span class="keywordflow">return</span> __TBB_CompareAndSwap1(ptr,value,comparand);
+<a name="l00220"></a>00220 <span class="preprocessor">#else</span>
+<a name="l00221"></a>00221 <span class="preprocessor"></span>    <span class="keywordflow">return</span> __TBB_MaskedCompareAndSwap&lt;1,uint8_t&gt;((<span class="keyword">volatile</span> uint8_t *)ptr,value,comparand);
+<a name="l00222"></a>00222 <span class="preprocessor">#endif</span>
+<a name="l00223"></a>00223 <span class="preprocessor"></span>}
+<a name="l00224"></a>00224 
+<a name="l00225"></a>00225 <span class="keyword">template</span>&lt;&gt;
+<a name="l00226"></a>00226 <span class="keyword">inline</span> uint16_t __TBB_CompareAndSwapGeneric &lt;2,uint16_t&gt; (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, uint16_t value, uint16_t comparand ) {
+<a name="l00227"></a>00227 #ifdef __TBB_CompareAndSwap2
+<a name="l00228"></a>00228     <span class="keywordflow">return</span> __TBB_CompareAndSwap2(ptr,value,comparand);
+<a name="l00229"></a>00229 <span class="preprocessor">#else</span>
+<a name="l00230"></a>00230 <span class="preprocessor"></span>    <span class="keywordflow">return</span> __TBB_MaskedCompareAndSwap&lt;2,uint16_t&gt;((<span class="keyword">volatile</span> uint16_t *)ptr,value,comparand);
+<a name="l00231"></a>00231 <span class="preprocessor">#endif</span>
+<a name="l00232"></a>00232 <span class="preprocessor"></span>}
+<a name="l00233"></a>00233 
+<a name="l00234"></a>00234 <span class="keyword">template</span>&lt;&gt;
+<a name="l00235"></a>00235 <span class="keyword">inline</span> uint32_t __TBB_CompareAndSwapGeneric &lt;4,uint32_t&gt; (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, uint32_t value, uint32_t comparand ) { 
+<a name="l00236"></a>00236     <span class="keywordflow">return</span> __TBB_CompareAndSwap4(ptr,value,comparand);
+<a name="l00237"></a>00237 }
+<a name="l00238"></a>00238 
+<a name="l00239"></a>00239 <span class="keyword">template</span>&lt;&gt;
+<a name="l00240"></a>00240 <span class="keyword">inline</span> uint64_t __TBB_CompareAndSwapGeneric &lt;8,uint64_t&gt; (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, uint64_t value, uint64_t comparand ) { 
+<a name="l00241"></a>00241     <span class="keywordflow">return</span> __TBB_CompareAndSwap8(ptr,value,comparand);
+<a name="l00242"></a>00242 }
+<a name="l00243"></a>00243 
+<a name="l00244"></a>00244 <span class="keyword">template</span>&lt;size_t S, <span class="keyword">typename</span> T&gt;
+<a name="l00245"></a>00245 <span class="keyword">inline</span> T __TBB_FetchAndAddGeneric (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, T addend) {
+<a name="l00246"></a>00246     atomic_backoff b;
+<a name="l00247"></a>00247     T result;
+<a name="l00248"></a>00248     <span class="keywordflow">for</span>(;;) {
+<a name="l00249"></a>00249         result = *reinterpret_cast&lt;volatile T *&gt;(ptr);
+<a name="l00250"></a>00250         <span class="comment">// __TBB_CompareAndSwapGeneric presumed to have full fence. </span>
+<a name="l00251"></a>00251         <span class="keywordflow">if</span>( __TBB_CompareAndSwapGeneric&lt;S,T&gt; ( ptr, result+addend, result )==result ) 
+<a name="l00252"></a>00252             <span class="keywordflow">break</span>;
+<a name="l00253"></a>00253         b.pause();
+<a name="l00254"></a>00254     }
+<a name="l00255"></a>00255     <span class="keywordflow">return</span> result;
+<a name="l00256"></a>00256 }
+<a name="l00257"></a>00257 
+<a name="l00258"></a>00258 <span class="keyword">template</span>&lt;size_t S, <span class="keyword">typename</span> T&gt;
+<a name="l00259"></a>00259 <span class="keyword">inline</span> T __TBB_FetchAndStoreGeneric (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, T value) {
+<a name="l00260"></a>00260     atomic_backoff b;
+<a name="l00261"></a>00261     T result;
+<a name="l00262"></a>00262     <span class="keywordflow">for</span>(;;) {
+<a name="l00263"></a>00263         result = *reinterpret_cast&lt;volatile T *&gt;(ptr);
+<a name="l00264"></a>00264         <span class="comment">// __TBB_CompareAndSwapGeneric presumed to have full fence.</span>
+<a name="l00265"></a>00265         <span class="keywordflow">if</span>( __TBB_CompareAndSwapGeneric&lt;S,T&gt; ( ptr, value, result )==result ) 
+<a name="l00266"></a>00266             <span class="keywordflow">break</span>;
+<a name="l00267"></a>00267         b.pause();
+<a name="l00268"></a>00268     }
+<a name="l00269"></a>00269     <span class="keywordflow">return</span> result;
+<a name="l00270"></a>00270 }
+<a name="l00271"></a>00271 
+<a name="l00272"></a>00272 <span class="comment">// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as </span>
+<a name="l00273"></a>00273 <span class="comment">// strict as type T.  Type type should have a trivial default constructor and destructor, so that</span>
+<a name="l00274"></a>00274 <span class="comment">// arrays of that type can be declared without initializers.  </span>
+<a name="l00275"></a>00275 <span class="comment">// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands</span>
+<a name="l00276"></a>00276 <span class="comment">// to a type bigger than T.</span>
+<a name="l00277"></a>00277 <span class="comment">// The default definition here works on machines where integers are naturally aligned and the</span>
+<a name="l00278"></a>00278 <span class="comment">// strictest alignment is 16.</span>
+<a name="l00279"></a>00279 <span class="preprocessor">#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict</span>
+<a name="l00280"></a>00280 <span class="preprocessor"></span>
+<a name="l00281"></a>00281 <span class="preprocessor">#if __GNUC__ || __SUNPRO_CC</span>
+<a name="l00282"></a>00282 <span class="preprocessor"></span><span class="keyword">struct </span>__TBB_machine_type_with_strictest_alignment {
+<a name="l00283"></a>00283     <span class="keywordtype">int</span> member[4];
+<a name="l00284"></a>00284 } __attribute__((aligned(16)));
+<a name="l00285"></a>00285 <span class="preprocessor">#elif _MSC_VER</span>
+<a name="l00286"></a>00286 <span class="preprocessor"></span>__declspec(align(16)) struct __TBB_machine_type_with_strictest_alignment {
+<a name="l00287"></a>00287     <span class="keywordtype">int</span> member[4];
+<a name="l00288"></a>00288 };
+<a name="l00289"></a>00289 <span class="preprocessor">#else</span>
+<a name="l00290"></a>00290 <span class="preprocessor"></span><span class="preprocessor">#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) or __TBB_machine_type_with_strictest_alignment</span>
+<a name="l00291"></a>00291 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00292"></a>00292 <span class="preprocessor"></span>
+<a name="l00293"></a>00293 <span class="keyword">template</span>&lt;size_t N&gt; <span class="keyword">struct </span>type_with_alignment {__TBB_machine_type_with_strictest_alignment member;};
+<a name="l00294"></a>00294 <span class="keyword">template</span>&lt;&gt; <span class="keyword">struct </span>type_with_alignment&lt;1&gt; { <span class="keywordtype">char</span> member; };
+<a name="l00295"></a>00295 <span class="keyword">template</span>&lt;&gt; <span class="keyword">struct </span>type_with_alignment&lt;2&gt; { uint16_t member; };
+<a name="l00296"></a>00296 <span class="keyword">template</span>&lt;&gt; <span class="keyword">struct </span>type_with_alignment&lt;4&gt; { uint32_t member; };
+<a name="l00297"></a>00297 <span class="keyword">template</span>&lt;&gt; <span class="keyword">struct </span>type_with_alignment&lt;8&gt; { uint64_t member; };
+<a name="l00298"></a>00298 
+<a name="l00299"></a>00299 <span class="preprocessor">#if _MSC_VER||defined(__GNUC__)&amp;&amp;__GNUC__==3 &amp;&amp; __GNUC_MINOR__&lt;=2  </span>
+<a name="l00301"></a>00301 <span class="preprocessor"></span>
+<a name="l00303"></a>00303 <span class="preprocessor">template&lt;size_t Size, typename T&gt; </span>
+<a name="l00304"></a><a class="code" href="a00217.html">00304</a> <span class="preprocessor"></span><span class="keyword">struct </span><a class="code" href="a00217.html">work_around_alignment_bug</a> {
+<a name="l00305"></a>00305 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00306"></a>00306 <span class="preprocessor"></span>    <span class="keyword">static</span> <span class="keyword">const</span> size_t alignment = __alignof(T);
+<a name="l00307"></a>00307 <span class="preprocessor">#else</span>
+<a name="l00308"></a>00308 <span class="preprocessor"></span>    <span class="keyword">static</span> <span class="keyword">const</span> size_t alignment = __alignof__(T);
+<a name="l00309"></a>00309 <span class="preprocessor">#endif</span>
+<a name="l00310"></a>00310 <span class="preprocessor"></span>};
+<a name="l00311"></a>00311 <span class="preprocessor">#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment&lt;tbb::internal::work_around_alignment_bug&lt;sizeof(T),T&gt;::alignment&gt;</span>
+<a name="l00312"></a>00312 <span class="preprocessor"></span><span class="preprocessor">#elif __GNUC__ || __SUNPRO_CC</span>
+<a name="l00313"></a>00313 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment&lt;__alignof__(T)&gt;</span>
+<a name="l00314"></a>00314 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00315"></a>00315 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) __TBB_machine_type_with_strictest_alignment</span>
+<a name="l00316"></a>00316 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00317"></a>00317 <span class="preprocessor"></span><span class="preprocessor">#endif  </span><span class="comment">/* ____TBB_TypeWithAlignmentAtLeastAsStrict */</span>
+<a name="l00318"></a>00318 
+<a name="l00319"></a>00319 <span class="comment">// Template class here is to avoid instantiation of the static data for modules that don't use it</span>
+<a name="l00320"></a>00320 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00321"></a>00321 <span class="keyword">struct </span>reverse {
+<a name="l00322"></a>00322     <span class="keyword">static</span> <span class="keyword">const</span> T byte_table[256];
+<a name="l00323"></a>00323 };
+<a name="l00324"></a>00324 <span class="comment">// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed</span>
+<a name="l00325"></a>00325 <span class="comment">// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost.</span>
+<a name="l00326"></a>00326 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00327"></a>00327 <span class="keyword">const</span> T reverse&lt;T&gt;::byte_table[256] = {
+<a name="l00328"></a>00328     0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
+<a name="l00329"></a>00329     0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
+<a name="l00330"></a>00330     0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
+<a name="l00331"></a>00331     0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
+<a name="l00332"></a>00332     0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
+<a name="l00333"></a>00333     0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
+<a name="l00334"></a>00334     0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
+<a name="l00335"></a>00335     0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
+<a name="l00336"></a>00336     0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
+<a name="l00337"></a>00337     0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
+<a name="l00338"></a>00338     0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
+<a name="l00339"></a>00339     0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
+<a name="l00340"></a>00340     0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
+<a name="l00341"></a>00341     0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
+<a name="l00342"></a>00342     0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
+<a name="l00343"></a>00343     0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
+<a name="l00344"></a>00344 };
+<a name="l00345"></a>00345 
+<a name="l00346"></a>00346 } <span class="comment">// namespace internal</span>
+<a name="l00347"></a>00347 } <span class="comment">// namespace tbb</span>
+<a name="l00348"></a>00348 
+<a name="l00349"></a>00349 <span class="preprocessor">#ifndef __TBB_CompareAndSwap1</span>
+<a name="l00350"></a>00350 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap1 tbb::internal::__TBB_CompareAndSwapGeneric&lt;1,uint8_t&gt;</span>
+<a name="l00351"></a>00351 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00352"></a>00352 <span class="preprocessor"></span>
+<a name="l00353"></a>00353 <span class="preprocessor">#ifndef __TBB_CompareAndSwap2 </span>
+<a name="l00354"></a>00354 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap2 tbb::internal::__TBB_CompareAndSwapGeneric&lt;2,uint16_t&gt;</span>
+<a name="l00355"></a>00355 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00356"></a>00356 <span class="preprocessor"></span>
+<a name="l00357"></a>00357 <span class="preprocessor">#ifndef __TBB_CompareAndSwapW</span>
+<a name="l00358"></a>00358 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwapW tbb::internal::__TBB_CompareAndSwapGeneric&lt;sizeof(ptrdiff_t),ptrdiff_t&gt;</span>
+<a name="l00359"></a>00359 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00360"></a>00360 <span class="preprocessor"></span>
+<a name="l00361"></a>00361 <span class="preprocessor">#ifndef __TBB_FetchAndAdd1</span>
+<a name="l00362"></a>00362 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd1 tbb::internal::__TBB_FetchAndAddGeneric&lt;1,uint8_t&gt;</span>
+<a name="l00363"></a>00363 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00364"></a>00364 <span class="preprocessor"></span>
+<a name="l00365"></a>00365 <span class="preprocessor">#ifndef __TBB_FetchAndAdd2</span>
+<a name="l00366"></a>00366 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd2 tbb::internal::__TBB_FetchAndAddGeneric&lt;2,uint16_t&gt;</span>
+<a name="l00367"></a>00367 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00368"></a>00368 <span class="preprocessor"></span>
+<a name="l00369"></a>00369 <span class="preprocessor">#ifndef __TBB_FetchAndAdd4</span>
+<a name="l00370"></a>00370 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd4 tbb::internal::__TBB_FetchAndAddGeneric&lt;4,uint32_t&gt;</span>
+<a name="l00371"></a>00371 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00372"></a>00372 <span class="preprocessor"></span>
+<a name="l00373"></a>00373 <span class="preprocessor">#ifndef __TBB_FetchAndAdd8</span>
+<a name="l00374"></a>00374 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd8 tbb::internal::__TBB_FetchAndAddGeneric&lt;8,uint64_t&gt;</span>
+<a name="l00375"></a>00375 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00376"></a>00376 <span class="preprocessor"></span>
+<a name="l00377"></a>00377 <span class="preprocessor">#ifndef __TBB_FetchAndAddW</span>
+<a name="l00378"></a>00378 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAddW tbb::internal::__TBB_FetchAndAddGeneric&lt;sizeof(ptrdiff_t),ptrdiff_t&gt;</span>
+<a name="l00379"></a>00379 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00380"></a>00380 <span class="preprocessor"></span>
+<a name="l00381"></a>00381 <span class="preprocessor">#ifndef __TBB_FetchAndStore1</span>
+<a name="l00382"></a>00382 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore1 tbb::internal::__TBB_FetchAndStoreGeneric&lt;1,uint8_t&gt;</span>
+<a name="l00383"></a>00383 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00384"></a>00384 <span class="preprocessor"></span>
+<a name="l00385"></a>00385 <span class="preprocessor">#ifndef __TBB_FetchAndStore2</span>
+<a name="l00386"></a>00386 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore2 tbb::internal::__TBB_FetchAndStoreGeneric&lt;2,uint16_t&gt;</span>
+<a name="l00387"></a>00387 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00388"></a>00388 <span class="preprocessor"></span>
+<a name="l00389"></a>00389 <span class="preprocessor">#ifndef __TBB_FetchAndStore4</span>
+<a name="l00390"></a>00390 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore4 tbb::internal::__TBB_FetchAndStoreGeneric&lt;4,uint32_t&gt;</span>
+<a name="l00391"></a>00391 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00392"></a>00392 <span class="preprocessor"></span>
+<a name="l00393"></a>00393 <span class="preprocessor">#ifndef __TBB_FetchAndStore8</span>
+<a name="l00394"></a>00394 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore8 tbb::internal::__TBB_FetchAndStoreGeneric&lt;8,uint64_t&gt;</span>
+<a name="l00395"></a>00395 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00396"></a>00396 <span class="preprocessor"></span>
+<a name="l00397"></a>00397 <span class="preprocessor">#ifndef __TBB_FetchAndStoreW</span>
+<a name="l00398"></a>00398 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStoreW tbb::internal::__TBB_FetchAndStoreGeneric&lt;sizeof(ptrdiff_t),ptrdiff_t&gt;</span>
+<a name="l00399"></a>00399 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00400"></a>00400 <span class="preprocessor"></span>
+<a name="l00401"></a>00401 <span class="preprocessor">#if __TBB_DECL_FENCED_ATOMICS</span>
+<a name="l00402"></a>00402 <span class="preprocessor"></span>
+<a name="l00403"></a>00403 <span class="preprocessor">#ifndef __TBB_CompareAndSwap1__TBB_full_fence</span>
+<a name="l00404"></a>00404 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap1__TBB_full_fence __TBB_CompareAndSwap1</span>
+<a name="l00405"></a>00405 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00406"></a>00406 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap1acquire</span>
+<a name="l00407"></a>00407 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap1acquire __TBB_CompareAndSwap1__TBB_full_fence</span>
+<a name="l00408"></a>00408 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00409"></a>00409 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap1release</span>
+<a name="l00410"></a>00410 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap1release __TBB_CompareAndSwap1__TBB_full_fence</span>
+<a name="l00411"></a>00411 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00412"></a>00412 <span class="preprocessor"></span>
+<a name="l00413"></a>00413 <span class="preprocessor">#ifndef __TBB_CompareAndSwap2__TBB_full_fence</span>
+<a name="l00414"></a>00414 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap2__TBB_full_fence __TBB_CompareAndSwap2</span>
+<a name="l00415"></a>00415 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00416"></a>00416 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap2acquire</span>
+<a name="l00417"></a>00417 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap2acquire __TBB_CompareAndSwap2__TBB_full_fence</span>
+<a name="l00418"></a>00418 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00419"></a>00419 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap2release</span>
+<a name="l00420"></a>00420 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap2release __TBB_CompareAndSwap2__TBB_full_fence</span>
+<a name="l00421"></a>00421 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00422"></a>00422 <span class="preprocessor"></span>
+<a name="l00423"></a>00423 <span class="preprocessor">#ifndef __TBB_CompareAndSwap4__TBB_full_fence</span>
+<a name="l00424"></a>00424 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap4__TBB_full_fence __TBB_CompareAndSwap4</span>
+<a name="l00425"></a>00425 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00426"></a>00426 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap4acquire</span>
+<a name="l00427"></a>00427 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap4acquire __TBB_CompareAndSwap4__TBB_full_fence</span>
+<a name="l00428"></a>00428 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00429"></a>00429 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap4release</span>
+<a name="l00430"></a>00430 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap4release __TBB_CompareAndSwap4__TBB_full_fence</span>
+<a name="l00431"></a>00431 <span class="preprocessor"></span><span class="preprocessor">#endif </span>
+<a name="l00432"></a>00432 <span class="preprocessor"></span>
+<a name="l00433"></a>00433 <span class="preprocessor">#ifndef __TBB_CompareAndSwap8__TBB_full_fence</span>
+<a name="l00434"></a>00434 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap8__TBB_full_fence __TBB_CompareAndSwap8</span>
+<a name="l00435"></a>00435 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00436"></a>00436 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap8acquire</span>
+<a name="l00437"></a>00437 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap8acquire __TBB_CompareAndSwap8__TBB_full_fence</span>
+<a name="l00438"></a>00438 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00439"></a>00439 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_CompareAndSwap8release</span>
+<a name="l00440"></a>00440 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_CompareAndSwap8release __TBB_CompareAndSwap8__TBB_full_fence</span>
+<a name="l00441"></a>00441 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00442"></a>00442 <span class="preprocessor"></span>
+<a name="l00443"></a>00443 <span class="preprocessor">#ifndef __TBB_FetchAndAdd1__TBB_full_fence</span>
+<a name="l00444"></a>00444 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd1__TBB_full_fence __TBB_FetchAndAdd1</span>
+<a name="l00445"></a>00445 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00446"></a>00446 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd1acquire</span>
+<a name="l00447"></a>00447 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd1acquire __TBB_FetchAndAdd1__TBB_full_fence</span>
+<a name="l00448"></a>00448 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00449"></a>00449 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd1release</span>
+<a name="l00450"></a>00450 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd1release __TBB_FetchAndAdd1__TBB_full_fence</span>
+<a name="l00451"></a>00451 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00452"></a>00452 <span class="preprocessor"></span>
+<a name="l00453"></a>00453 <span class="preprocessor">#ifndef __TBB_FetchAndAdd2__TBB_full_fence</span>
+<a name="l00454"></a>00454 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd2__TBB_full_fence __TBB_FetchAndAdd2</span>
+<a name="l00455"></a>00455 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00456"></a>00456 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd2acquire</span>
+<a name="l00457"></a>00457 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd2acquire __TBB_FetchAndAdd2__TBB_full_fence</span>
+<a name="l00458"></a>00458 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00459"></a>00459 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd2release</span>
+<a name="l00460"></a>00460 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd2release __TBB_FetchAndAdd2__TBB_full_fence</span>
+<a name="l00461"></a>00461 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00462"></a>00462 <span class="preprocessor"></span>
+<a name="l00463"></a>00463 <span class="preprocessor">#ifndef __TBB_FetchAndAdd4__TBB_full_fence</span>
+<a name="l00464"></a>00464 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd4__TBB_full_fence __TBB_FetchAndAdd4</span>
+<a name="l00465"></a>00465 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00466"></a>00466 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd4acquire</span>
+<a name="l00467"></a>00467 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd4acquire __TBB_FetchAndAdd4__TBB_full_fence</span>
+<a name="l00468"></a>00468 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00469"></a>00469 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd4release</span>
+<a name="l00470"></a>00470 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd4release __TBB_FetchAndAdd4__TBB_full_fence</span>
+<a name="l00471"></a>00471 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00472"></a>00472 <span class="preprocessor"></span>
+<a name="l00473"></a>00473 <span class="preprocessor">#ifndef __TBB_FetchAndAdd8__TBB_full_fence</span>
+<a name="l00474"></a>00474 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd8__TBB_full_fence __TBB_FetchAndAdd8</span>
+<a name="l00475"></a>00475 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00476"></a>00476 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd8acquire</span>
+<a name="l00477"></a>00477 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd8acquire __TBB_FetchAndAdd8__TBB_full_fence</span>
+<a name="l00478"></a>00478 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00479"></a>00479 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndAdd8release</span>
+<a name="l00480"></a>00480 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAdd8release __TBB_FetchAndAdd8__TBB_full_fence</span>
+<a name="l00481"></a>00481 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00482"></a>00482 <span class="preprocessor"></span>
+<a name="l00483"></a>00483 <span class="preprocessor">#ifndef __TBB_FetchAndStore1__TBB_full_fence</span>
+<a name="l00484"></a>00484 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore1__TBB_full_fence __TBB_FetchAndStore1</span>
+<a name="l00485"></a>00485 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00486"></a>00486 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore1acquire</span>
+<a name="l00487"></a>00487 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore1acquire __TBB_FetchAndStore1__TBB_full_fence</span>
+<a name="l00488"></a>00488 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00489"></a>00489 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore1release</span>
+<a name="l00490"></a>00490 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore1release __TBB_FetchAndStore1__TBB_full_fence</span>
+<a name="l00491"></a>00491 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00492"></a>00492 <span class="preprocessor"></span>
+<a name="l00493"></a>00493 <span class="preprocessor">#ifndef __TBB_FetchAndStore2__TBB_full_fence</span>
+<a name="l00494"></a>00494 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore2__TBB_full_fence __TBB_FetchAndStore2</span>
+<a name="l00495"></a>00495 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00496"></a>00496 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore2acquire</span>
+<a name="l00497"></a>00497 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore2acquire __TBB_FetchAndStore2__TBB_full_fence</span>
+<a name="l00498"></a>00498 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00499"></a>00499 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore2release</span>
+<a name="l00500"></a>00500 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore2release __TBB_FetchAndStore2__TBB_full_fence</span>
+<a name="l00501"></a>00501 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00502"></a>00502 <span class="preprocessor"></span>
+<a name="l00503"></a>00503 <span class="preprocessor">#ifndef __TBB_FetchAndStore4__TBB_full_fence</span>
+<a name="l00504"></a>00504 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore4__TBB_full_fence __TBB_FetchAndStore4</span>
+<a name="l00505"></a>00505 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00506"></a>00506 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore4acquire</span>
+<a name="l00507"></a>00507 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore4acquire __TBB_FetchAndStore4__TBB_full_fence</span>
+<a name="l00508"></a>00508 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00509"></a>00509 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore4release</span>
+<a name="l00510"></a>00510 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore4release __TBB_FetchAndStore4__TBB_full_fence</span>
+<a name="l00511"></a>00511 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00512"></a>00512 <span class="preprocessor"></span>
+<a name="l00513"></a>00513 <span class="preprocessor">#ifndef __TBB_FetchAndStore8__TBB_full_fence</span>
+<a name="l00514"></a>00514 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore8__TBB_full_fence __TBB_FetchAndStore8</span>
+<a name="l00515"></a>00515 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00516"></a>00516 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore8acquire</span>
+<a name="l00517"></a>00517 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore8acquire __TBB_FetchAndStore8__TBB_full_fence</span>
+<a name="l00518"></a>00518 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00519"></a>00519 <span class="preprocessor"></span><span class="preprocessor">#ifndef __TBB_FetchAndStore8release</span>
+<a name="l00520"></a>00520 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndStore8release __TBB_FetchAndStore8__TBB_full_fence</span>
+<a name="l00521"></a>00521 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00522"></a>00522 <span class="preprocessor"></span>
+<a name="l00523"></a>00523 <span class="preprocessor">#endif // __TBB_DECL_FENCED_ATOMICS</span>
+<a name="l00524"></a>00524 <span class="preprocessor"></span>
+<a name="l00525"></a>00525 <span class="comment">// Special atomic functions</span>
+<a name="l00526"></a>00526 <span class="preprocessor">#ifndef __TBB_FetchAndAddWrelease</span>
+<a name="l00527"></a>00527 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndAddWrelease __TBB_FetchAndAddW</span>
+<a name="l00528"></a>00528 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00529"></a>00529 <span class="preprocessor"></span>
+<a name="l00530"></a>00530 <span class="preprocessor">#ifndef __TBB_FetchAndIncrementWacquire</span>
+<a name="l00531"></a>00531 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)</span>
+<a name="l00532"></a>00532 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00533"></a>00533 <span class="preprocessor"></span>
+<a name="l00534"></a>00534 <span class="preprocessor">#ifndef __TBB_FetchAndDecrementWrelease</span>
+<a name="l00535"></a>00535 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1))</span>
+<a name="l00536"></a>00536 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00537"></a>00537 <span class="preprocessor"></span>
+<a name="l00538"></a>00538 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, size_t S&gt;
+<a name="l00539"></a>00539 <span class="keyword">struct </span>__TBB_machine_load_store {
+<a name="l00540"></a>00540     <span class="keyword">static</span> <span class="keyword">inline</span> T load_with_acquire(<span class="keyword">const</span> <span class="keyword">volatile</span> T&amp; location) {
+<a name="l00541"></a>00541         T to_return = location;
+<a name="l00542"></a>00542         __TBB_release_consistency_helper();
+<a name="l00543"></a>00543         <span class="keywordflow">return</span> to_return;
+<a name="l00544"></a>00544     }
+<a name="l00545"></a>00545 
+<a name="l00546"></a>00546     <span class="keyword">static</span> <span class="keyword">inline</span> <span class="keywordtype">void</span> store_with_release(<span class="keyword">volatile</span> T &amp;location, T value) {
+<a name="l00547"></a>00547         __TBB_release_consistency_helper();
+<a name="l00548"></a>00548         location = value;
+<a name="l00549"></a>00549     }
+<a name="l00550"></a>00550 };
+<a name="l00551"></a>00551 
+<a name="l00552"></a>00552 <span class="preprocessor">#if __TBB_WORDSIZE==4</span>
+<a name="l00553"></a>00553 <span class="preprocessor"></span><span class="preprocessor">#if _MSC_VER</span>
+<a name="l00554"></a>00554 <span class="preprocessor"></span><span class="keyword">using</span> tbb::internal::int64_t;
+<a name="l00555"></a>00555 <span class="preprocessor">#endif</span>
+<a name="l00556"></a>00556 <span class="preprocessor"></span><span class="comment">// On 32-bit platforms, there should be definition of __TBB_Store8 and __TBB_Load8</span>
+<a name="l00557"></a>00557 <span class="preprocessor">#ifndef __TBB_Store8</span>
+<a name="l00558"></a>00558 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_Store8 (<span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr, int64_t value) {
+<a name="l00559"></a>00559     <span class="keywordflow">for</span>(;;) {
+<a name="l00560"></a>00560         int64_t result = *(int64_t *)ptr;
+<a name="l00561"></a>00561         <span class="keywordflow">if</span>( __TBB_CompareAndSwap8(ptr,value,result)==result ) <span class="keywordflow">break</span>;
+<a name="l00562"></a>00562     }
+<a name="l00563"></a>00563 }
+<a name="l00564"></a>00564 <span class="preprocessor">#endif</span>
+<a name="l00565"></a>00565 <span class="preprocessor"></span>
+<a name="l00566"></a>00566 <span class="preprocessor">#ifndef __TBB_Load8</span>
+<a name="l00567"></a>00567 <span class="preprocessor"></span><span class="keyword">inline</span> int64_t __TBB_Load8 (<span class="keyword">const</span> <span class="keyword">volatile</span> <span class="keywordtype">void</span> *ptr) {
+<a name="l00568"></a>00568     <span class="keyword">const</span> int64_t anyvalue = 3264; <span class="comment">// Could be anything, just the same for comparand and new value</span>
+<a name="l00569"></a>00569     <span class="keywordflow">return</span> __TBB_CompareAndSwap8(const_cast&lt;volatile void *&gt;(ptr),anyvalue,anyvalue);
+<a name="l00570"></a>00570 }
+<a name="l00571"></a>00571 <span class="preprocessor">#endif</span>
+<a name="l00572"></a>00572 <span class="preprocessor"></span>
+<a name="l00573"></a>00573 <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;
+<a name="l00574"></a>00574 <span class="keyword">struct </span>__TBB_machine_load_store&lt;T,8&gt; {
+<a name="l00575"></a>00575     <span class="keyword">static</span> <span class="keyword">inline</span> T load_with_acquire(<span class="keyword">const</span> <span class="keyword">volatile</span> T&amp; location) {
+<a name="l00576"></a>00576         T to_return = (T)__TBB_Load8((<span class="keyword">const</span> <span class="keyword">volatile</span> <span class="keywordtype">void</span>*)&amp;location);
+<a name="l00577"></a>00577         __TBB_release_consistency_helper();
+<a name="l00578"></a>00578         <span class="keywordflow">return</span> to_return;
+<a name="l00579"></a>00579     }
+<a name="l00580"></a>00580 
+<a name="l00581"></a>00581     <span class="keyword">static</span> <span class="keyword">inline</span> <span class="keywordtype">void</span> store_with_release(<span class="keyword">volatile</span> T&amp; location, T value) {
+<a name="l00582"></a>00582         __TBB_release_consistency_helper();
+<a name="l00583"></a>00583         __TBB_Store8((<span class="keyword">volatile</span> <span class="keywordtype">void</span> *)&amp;location,(int64_t)value);
+<a name="l00584"></a>00584     }
+<a name="l00585"></a>00585 };
+<a name="l00586"></a>00586 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_WORDSIZE==4 */</span>
+<a name="l00587"></a>00587 
+<a name="l00588"></a>00588 <span class="preprocessor">#ifndef __TBB_load_with_acquire</span>
+<a name="l00589"></a>00589 <span class="preprocessor"></span><span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00590"></a>00590 <span class="keyword">inline</span> T __TBB_load_with_acquire(<span class="keyword">const</span> <span class="keyword">volatile</span> T &amp;location) {
+<a name="l00591"></a>00591     <span class="keywordflow">return</span> __TBB_machine_load_store&lt;T,sizeof(T)&gt;::load_with_acquire(location);
+<a name="l00592"></a>00592 }
+<a name="l00593"></a>00593 <span class="preprocessor">#endif</span>
+<a name="l00594"></a>00594 <span class="preprocessor"></span>
+<a name="l00595"></a>00595 <span class="preprocessor">#ifndef __TBB_store_with_release</span>
+<a name="l00596"></a>00596 <span class="preprocessor"></span><span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> V&gt;
+<a name="l00597"></a>00597 <span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_store_with_release(<span class="keyword">volatile</span> T&amp; location, V value) {
+<a name="l00598"></a>00598     __TBB_machine_load_store&lt;T,sizeof(T)&gt;::store_with_release(location,T(value));
+<a name="l00599"></a>00599 }
+<a name="l00601"></a>00601 <span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_store_with_release(<span class="keyword">volatile</span> size_t&amp; location, size_t value) {
+<a name="l00602"></a>00602     __TBB_machine_load_store&lt;size_t,sizeof(size_t)&gt;::store_with_release(location,value);
+<a name="l00603"></a>00603 }
+<a name="l00604"></a>00604 <span class="preprocessor">#endif</span>
+<a name="l00605"></a>00605 <span class="preprocessor"></span>
+<a name="l00606"></a>00606 <span class="preprocessor">#ifndef __TBB_Log2</span>
+<a name="l00607"></a>00607 <span class="preprocessor"></span><span class="keyword">inline</span> intptr_t __TBB_Log2( uintptr_t x ) {
+<a name="l00608"></a>00608     <span class="keywordflow">if</span>( x==0 ) <span class="keywordflow">return</span> -1;
+<a name="l00609"></a>00609     intptr_t result = 0;
+<a name="l00610"></a>00610     uintptr_t tmp;
+<a name="l00611"></a>00611 <span class="preprocessor">#if __TBB_WORDSIZE&gt;=8</span>
+<a name="l00612"></a>00612 <span class="preprocessor"></span>    <span class="keywordflow">if</span>( (tmp = x&gt;&gt;32) ) { x=tmp; result += 32; }
+<a name="l00613"></a>00613 <span class="preprocessor">#endif</span>
+<a name="l00614"></a>00614 <span class="preprocessor"></span>    <span class="keywordflow">if</span>( (tmp = x&gt;&gt;16) ) { x=tmp; result += 16; }
+<a name="l00615"></a>00615     <span class="keywordflow">if</span>( (tmp = x&gt;&gt;8) )  { x=tmp; result += 8; }
+<a name="l00616"></a>00616     <span class="keywordflow">if</span>( (tmp = x&gt;&gt;4) )  { x=tmp; result += 4; }
+<a name="l00617"></a>00617     <span class="keywordflow">if</span>( (tmp = x&gt;&gt;2) )  { x=tmp; result += 2; }
+<a name="l00618"></a>00618     <span class="keywordflow">return</span> (x&amp;2)? result+1: result;
+<a name="l00619"></a>00619 }
+<a name="l00620"></a>00620 <span class="preprocessor">#endif</span>
+<a name="l00621"></a>00621 <span class="preprocessor"></span>
+<a name="l00622"></a>00622 <span class="preprocessor">#ifndef __TBB_AtomicOR</span>
+<a name="l00623"></a>00623 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_AtomicOR( <span class="keyword">volatile</span> <span class="keywordtype">void</span> *operand, uintptr_t addend ) {
+<a name="l00624"></a>00624     <a class="code" href="a00149.html">tbb::internal::atomic_backoff</a> b;
+<a name="l00625"></a>00625     <span class="keywordflow">for</span>(;;) {
+<a name="l00626"></a>00626         uintptr_t tmp = *(<span class="keyword">volatile</span> uintptr_t *)operand;
+<a name="l00627"></a>00627         uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp);
+<a name="l00628"></a>00628         <span class="keywordflow">if</span>( result==tmp ) <span class="keywordflow">break</span>;
+<a name="l00629"></a>00629         b.<a class="code" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a>();
+<a name="l00630"></a>00630     }
+<a name="l00631"></a>00631 }
+<a name="l00632"></a>00632 <span class="preprocessor">#endif</span>
+<a name="l00633"></a>00633 <span class="preprocessor"></span>
+<a name="l00634"></a>00634 <span class="preprocessor">#ifndef __TBB_AtomicAND</span>
+<a name="l00635"></a>00635 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">void</span> __TBB_AtomicAND( <span class="keyword">volatile</span> <span class="keywordtype">void</span> *operand, uintptr_t addend ) {
+<a name="l00636"></a>00636     <a class="code" href="a00149.html">tbb::internal::atomic_backoff</a> b;
+<a name="l00637"></a>00637     <span class="keywordflow">for</span>(;;) {
+<a name="l00638"></a>00638         uintptr_t tmp = *(<span class="keyword">volatile</span> uintptr_t *)operand;
+<a name="l00639"></a>00639         uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&amp;addend, tmp);
+<a name="l00640"></a>00640         <span class="keywordflow">if</span>( result==tmp ) <span class="keywordflow">break</span>;
+<a name="l00641"></a>00641         b.<a class="code" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a>();
+<a name="l00642"></a>00642     }
+<a name="l00643"></a>00643 }
+<a name="l00644"></a>00644 <span class="preprocessor">#endif</span>
+<a name="l00645"></a>00645 <span class="preprocessor"></span>
+<a name="l00646"></a>00646 <span class="preprocessor">#ifndef __TBB_TryLockByte</span>
+<a name="l00647"></a>00647 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">bool</span> __TBB_TryLockByte( <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> &amp;flag ) {
+<a name="l00648"></a>00648     <span class="keywordflow">return</span> __TBB_CompareAndSwap1(&amp;flag,1,0)==0;
+<a name="l00649"></a>00649 }
+<a name="l00650"></a>00650 <span class="preprocessor">#endif</span>
+<a name="l00651"></a>00651 <span class="preprocessor"></span>
+<a name="l00652"></a>00652 <span class="preprocessor">#ifndef __TBB_LockByte</span>
+<a name="l00653"></a>00653 <span class="preprocessor"></span><span class="keyword">inline</span> uintptr_t __TBB_LockByte( <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span>&amp; flag ) {
+<a name="l00654"></a>00654     <span class="keywordflow">if</span> ( !__TBB_TryLockByte(flag) ) {
+<a name="l00655"></a>00655         <a class="code" href="a00149.html">tbb::internal::atomic_backoff</a> b;
+<a name="l00656"></a>00656         <span class="keywordflow">do</span> {
+<a name="l00657"></a>00657             b.<a class="code" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">pause</a>();
+<a name="l00658"></a>00658         } <span class="keywordflow">while</span> ( !__TBB_TryLockByte(flag) );
+<a name="l00659"></a>00659     }
+<a name="l00660"></a>00660     <span class="keywordflow">return</span> 0;
+<a name="l00661"></a>00661 }
+<a name="l00662"></a>00662 <span class="preprocessor">#endif</span>
+<a name="l00663"></a>00663 <span class="preprocessor"></span>
+<a name="l00664"></a>00664 <span class="preprocessor">#ifndef __TBB_ReverseByte</span>
+<a name="l00665"></a>00665 <span class="preprocessor"></span><span class="keyword">inline</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> __TBB_ReverseByte(<span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> src) {
+<a name="l00666"></a>00666     <span class="keywordflow">return</span> tbb::internal::reverse&lt;unsigned char&gt;::byte_table[src];
+<a name="l00667"></a>00667 }
+<a name="l00668"></a>00668 <span class="preprocessor">#endif</span>
+<a name="l00669"></a>00669 <span class="preprocessor"></span>
+<a name="l00670"></a>00670 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00671"></a>00671 T __TBB_ReverseBits(T src)
+<a name="l00672"></a>00672 {
+<a name="l00673"></a>00673     T dst;
+<a name="l00674"></a>00674     <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> *original = (<span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> *) &amp;src;
+<a name="l00675"></a>00675     <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> *reversed = (<span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> *) &amp;dst;
+<a name="l00676"></a>00676 
+<a name="l00677"></a>00677     <span class="keywordflow">for</span>( <span class="keywordtype">int</span> i = <span class="keyword">sizeof</span>(T)-1; i &gt;= 0; i-- )
+<a name="l00678"></a>00678         reversed[i] = __TBB_ReverseByte( original[<span class="keyword">sizeof</span>(T)-i-1] );
+<a name="l00679"></a>00679 
+<a name="l00680"></a>00680     <span class="keywordflow">return</span> dst;
+<a name="l00681"></a>00681 }
+<a name="l00682"></a>00682 
+<a name="l00683"></a>00683 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_machine_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00470.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00470.html
new file mode 100644 (file)
index 0000000..cf221d6
--- /dev/null
@@ -0,0 +1,125 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_profiling.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_profiling.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_profiling_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_profiling_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="comment">// Check if the tools support is enabled</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#if (_WIN32||_WIN64||__linux__) &amp;&amp; !__MINGW32__ &amp;&amp; TBB_USE_THREADING_TOOLS</span>
+<a name="l00026"></a>00026 <span class="preprocessor"></span>
+<a name="l00027"></a>00027 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span><span class="preprocessor">#include &lt;stdlib.h&gt;</span>  <span class="comment">/* mbstowcs_s */</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#endif</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00031"></a>00031 
+<a name="l00032"></a>00032 <span class="keyword">namespace </span>tbb {
+<a name="l00033"></a>00033     <span class="keyword">namespace </span>internal {
+<a name="l00034"></a>00034 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span>        <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC itt_set_sync_name_v3( <span class="keywordtype">void</span> *obj, <span class="keyword">const</span> <span class="keywordtype">wchar_t</span>* name ); 
+<a name="l00036"></a>00036         <span class="keyword">inline</span> size_t multibyte_to_widechar( <span class="keywordtype">wchar_t</span>* wcs, <span class="keyword">const</span> <span class="keywordtype">char</span>* mbs, size_t bufsize) {
+<a name="l00037"></a>00037 <span class="preprocessor">#if _MSC_VER&gt;=1400</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span>            size_t len;
+<a name="l00039"></a>00039             mbstowcs_s( &amp;len, wcs, bufsize, mbs, _TRUNCATE );
+<a name="l00040"></a>00040             <span class="keywordflow">return</span> len;   <span class="comment">// mbstowcs_s counts null terminator</span>
+<a name="l00041"></a>00041 <span class="preprocessor">#else</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span>            size_t len = mbstowcs( wcs, mbs, bufsize );
+<a name="l00043"></a>00043             <span class="keywordflow">if</span>(wcs &amp;&amp; len!=size_t(-1) )
+<a name="l00044"></a>00044                 wcs[len&lt;bufsize-1? len: bufsize-1] = wchar_t(<span class="charliteral">'\0'</span>);
+<a name="l00045"></a>00045             <span class="keywordflow">return</span> len+1; <span class="comment">// mbstowcs does not count null terminator</span>
+<a name="l00046"></a>00046 <span class="preprocessor">#endif</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span>        }
+<a name="l00048"></a>00048 <span class="preprocessor">#else</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span>        <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC itt_set_sync_name_v3( <span class="keywordtype">void</span> *obj, <span class="keyword">const</span> <span class="keywordtype">char</span>* name ); 
+<a name="l00050"></a>00050 <span class="preprocessor">#endif</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span>    } <span class="comment">// namespace internal</span>
+<a name="l00052"></a>00052 } <span class="comment">// namespace tbb</span>
+<a name="l00053"></a>00053 
+<a name="l00055"></a>00055 
+<a name="l00057"></a>00057 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00058"></a>00058 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \</span>
+<a name="l00059"></a>00059 <span class="preprocessor">        namespace profiling {                                                       \</span>
+<a name="l00060"></a>00060 <span class="preprocessor">            inline void set_name( sync_object_type&amp; obj, const wchar_t* name ) {    \</span>
+<a name="l00061"></a>00061 <span class="preprocessor">                tbb::internal::itt_set_sync_name_v3( &amp;obj, name );                  \</span>
+<a name="l00062"></a>00062 <span class="preprocessor">            }                                                                       \</span>
+<a name="l00063"></a>00063 <span class="preprocessor">            inline void set_name( sync_object_type&amp; obj, const char* name ) {       \</span>
+<a name="l00064"></a>00064 <span class="preprocessor">                size_t len = tbb::internal::multibyte_to_widechar(NULL, name, 0);   \</span>
+<a name="l00065"></a>00065 <span class="preprocessor">                wchar_t *wname = new wchar_t[len];                                  \</span>
+<a name="l00066"></a>00066 <span class="preprocessor">                tbb::internal::multibyte_to_widechar(wname, name, len);             \</span>
+<a name="l00067"></a>00067 <span class="preprocessor">                set_name( obj, wname );                                             \</span>
+<a name="l00068"></a>00068 <span class="preprocessor">                delete[] wname;                                                     \</span>
+<a name="l00069"></a>00069 <span class="preprocessor">            }                                                                       \</span>
+<a name="l00070"></a>00070 <span class="preprocessor">        }</span>
+<a name="l00071"></a>00071 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* !WIN */</span>
+<a name="l00072"></a>00072 <span class="preprocessor">    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \</span>
+<a name="l00073"></a>00073 <span class="preprocessor">        namespace profiling {                                                       \</span>
+<a name="l00074"></a>00074 <span class="preprocessor">            inline void set_name( sync_object_type&amp; obj, const char* name ) {       \</span>
+<a name="l00075"></a>00075 <span class="preprocessor">                tbb::internal::itt_set_sync_name_v3( &amp;obj, name );                  \</span>
+<a name="l00076"></a>00076 <span class="preprocessor">            }                                                                       \</span>
+<a name="l00077"></a>00077 <span class="preprocessor">        }</span>
+<a name="l00078"></a>00078 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* !WIN */</span>
+<a name="l00079"></a>00079 
+<a name="l00080"></a>00080 <span class="preprocessor">#else </span><span class="comment">/* no tools support */</span>
+<a name="l00081"></a>00081 
+<a name="l00082"></a>00082 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00083"></a>00083 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \</span>
+<a name="l00084"></a>00084 <span class="preprocessor">        namespace profiling {                                               \</span>
+<a name="l00085"></a>00085 <span class="preprocessor">            inline void set_name( sync_object_type&amp;, const wchar_t* ) {}    \</span>
+<a name="l00086"></a>00086 <span class="preprocessor">            inline void set_name( sync_object_type&amp;, const char* ) {}       \</span>
+<a name="l00087"></a>00087 <span class="preprocessor">        }</span>
+<a name="l00088"></a>00088 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* !WIN */</span>
+<a name="l00089"></a>00089 <span class="preprocessor">    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \</span>
+<a name="l00090"></a>00090 <span class="preprocessor">        namespace profiling {                                               \</span>
+<a name="l00091"></a>00091 <span class="preprocessor">            inline void set_name( sync_object_type&amp;, const char* ) {}       \</span>
+<a name="l00092"></a>00092 <span class="preprocessor">        }</span>
+<a name="l00093"></a>00093 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* !WIN */</span>
+<a name="l00094"></a>00094 
+<a name="l00095"></a>00095 <span class="preprocessor">#endif </span><span class="comment">/* no tools support */</span>
+<a name="l00096"></a>00096 
+<a name="l00097"></a>00097 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_profiling_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00471.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00471.html
new file mode 100644 (file)
index 0000000..42b425c
--- /dev/null
@@ -0,0 +1,260 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_stddef.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_stddef.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_stddef_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_stddef_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="comment">// Marketing-driven product version</span>
+<a name="l00025"></a>00025 <span class="preprocessor">#define TBB_VERSION_MAJOR 3</span>
+<a name="l00026"></a>00026 <span class="preprocessor"></span><span class="preprocessor">#define TBB_VERSION_MINOR 0</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span>
+<a name="l00028"></a>00028 <span class="comment">// Engineering-focused interface version</span>
+<a name="l00029"></a>00029 <span class="preprocessor">#define TBB_INTERFACE_VERSION 5003</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span>
+<a name="l00032"></a>00032 <span class="comment">// The oldest major interface version still supported</span>
+<a name="l00033"></a>00033 <span class="comment">// To be used in SONAME, manifests, etc.</span>
+<a name="l00034"></a>00034 <span class="preprocessor">#define TBB_COMPATIBLE_INTERFACE_VERSION 2</span>
+<a name="l00035"></a>00035 <span class="preprocessor"></span>
+<a name="l00036"></a>00036 <span class="preprocessor">#define __TBB_STRING_AUX(x) #x</span>
+<a name="l00037"></a>00037 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_STRING(x) __TBB_STRING_AUX(x)</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span>
+<a name="l00039"></a>00039 <span class="comment">// We do not need defines below for resource processing on windows</span>
+<a name="l00040"></a>00040 <span class="preprocessor">#if !defined RC_INVOKED</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span>
+<a name="l00042"></a>00042 <span class="comment">// Define groups for Doxygen documentation</span>
+<a name="l00052"></a>00052 <span class="comment"></span><span class="comment">// Simple text that is displayed on the main page of Doxygen documentation.</span>
+<a name="l00094"></a>00094 <span class="comment"></span><span class="comment">// Define preprocessor symbols used to determine architecture</span>
+<a name="l00095"></a>00095 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00096"></a>00096 <span class="preprocessor"></span><span class="preprocessor">#   if defined(_M_AMD64)</span>
+<a name="l00097"></a>00097 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_x86_64 1</span>
+<a name="l00098"></a>00098 <span class="preprocessor"></span><span class="preprocessor">#   elif defined(_M_IA64)</span>
+<a name="l00099"></a>00099 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_ipf 1</span>
+<a name="l00100"></a>00100 <span class="preprocessor"></span><span class="preprocessor">#   elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support</span>
+<a name="l00101"></a>00101 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_x86_32 1</span>
+<a name="l00102"></a>00102 <span class="preprocessor"></span><span class="preprocessor">#   endif</span>
+<a name="l00103"></a>00103 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* Assume generic Unix */</span>
+<a name="l00104"></a>00104 <span class="preprocessor">#   if !__linux__ &amp;&amp; !__APPLE__</span>
+<a name="l00105"></a>00105 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_generic_os 1</span>
+<a name="l00106"></a>00106 <span class="preprocessor"></span><span class="preprocessor">#   endif</span>
+<a name="l00107"></a>00107 <span class="preprocessor"></span><span class="preprocessor">#   if __x86_64__</span>
+<a name="l00108"></a>00108 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_x86_64 1</span>
+<a name="l00109"></a>00109 <span class="preprocessor"></span><span class="preprocessor">#   elif __ia64__</span>
+<a name="l00110"></a>00110 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_ipf 1</span>
+<a name="l00111"></a>00111 <span class="preprocessor"></span><span class="preprocessor">#   elif __i386__||__i386  // __i386 is for Sun OS</span>
+<a name="l00112"></a>00112 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_x86_32 1</span>
+<a name="l00113"></a>00113 <span class="preprocessor"></span><span class="preprocessor">#   else</span>
+<a name="l00114"></a>00114 <span class="preprocessor"></span><span class="preprocessor">#       define __TBB_generic_arch 1</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span><span class="preprocessor">#   endif</span>
+<a name="l00116"></a>00116 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>
+<a name="l00118"></a>00118 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span><span class="comment">// define the parts of stdint.h that are needed, but put them inside tbb::internal</span>
+<a name="l00120"></a>00120 <span class="keyword">namespace </span>tbb {
+<a name="l00121"></a>00121 <span class="keyword">namespace </span>internal {
+<a name="l00122"></a>00122     <span class="keyword">typedef</span> __int8 int8_t;
+<a name="l00123"></a>00123     <span class="keyword">typedef</span> __int16 int16_t;
+<a name="l00124"></a>00124     <span class="keyword">typedef</span> __int32 int32_t;
+<a name="l00125"></a>00125     <span class="keyword">typedef</span> __int64 int64_t;
+<a name="l00126"></a>00126     <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> __int8 uint8_t;
+<a name="l00127"></a>00127     <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> __int16 uint16_t;
+<a name="l00128"></a>00128     <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> __int32 uint32_t;
+<a name="l00129"></a>00129     <span class="keyword">typedef</span> <span class="keywordtype">unsigned</span> __int64 uint64_t;
+<a name="l00130"></a>00130 } <span class="comment">// namespace internal</span>
+<a name="l00131"></a>00131 } <span class="comment">// namespace tbb</span>
+<a name="l00132"></a>00132 <span class="preprocessor">#else</span>
+<a name="l00133"></a>00133 <span class="preprocessor"></span><span class="preprocessor">#include &lt;stdint.h&gt;</span>
+<a name="l00134"></a>00134 <span class="preprocessor">#endif </span><span class="comment">/* _MSC_VER */</span>
+<a name="l00135"></a>00135 
+<a name="l00136"></a>00136 <span class="preprocessor">#if _MSC_VER &gt;=1400</span>
+<a name="l00137"></a>00137 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_FUNC   __cdecl</span>
+<a name="l00138"></a>00138 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_METHOD __thiscall</span>
+<a name="l00139"></a>00139 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00140"></a>00140 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_FUNC</span>
+<a name="l00141"></a>00141 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_EXPORTED_METHOD</span>
+<a name="l00142"></a>00142 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00143"></a>00143 <span class="preprocessor"></span>
+<a name="l00144"></a>00144 <span class="preprocessor">#include &lt;cstddef&gt;</span>      <span class="comment">/* Need size_t and ptrdiff_t */</span>
+<a name="l00145"></a>00145 
+<a name="l00146"></a>00146 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00147"></a>00147 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_windef_H</span>
+<a name="l00148"></a>00148 <span class="preprocessor"></span><span class="preprocessor">#include "_tbb_windef.h"</span>
+<a name="l00149"></a>00149 <span class="preprocessor">#undef __TBB_tbb_windef_H</span>
+<a name="l00150"></a>00150 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00151"></a>00151 <span class="preprocessor"></span>
+<a name="l00152"></a>00152 <span class="preprocessor">#include "tbb_config.h"</span>
+<a name="l00153"></a>00153 
+<a name="l00155"></a>00155 <span class="keyword">namespace </span>tbb {
+<a name="l00156"></a>00156 
+<a name="l00157"></a>00157 <span class="keyword">using</span> std::size_t; <span class="keyword">using</span> std::ptrdiff_t;
+<a name="l00158"></a>00158 
+<a name="l00160"></a><a class="code" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">00160</a>     <span class="keyword">typedef</span> void(*<a class="code" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a>)( <span class="keyword">const</span> <span class="keywordtype">char</span>* filename, <span class="keywordtype">int</span> line, <span class="keyword">const</span> <span class="keywordtype">char</span>* expression, <span class="keyword">const</span> <span class="keywordtype">char</span> * comment );
+<a name="l00161"></a>00161 
+<a name="l00162"></a>00162 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00163"></a>00163 <span class="preprocessor"></span>
+<a name="l00165"></a>00165 
+<a name="l00168"></a>00168 <span class="preprocessor">#define __TBB_ASSERT(predicate,message) ((predicate)?((void)0):tbb::assertion_failure(__FILE__,__LINE__,#predicate,message))</span>
+<a name="l00169"></a>00169 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_ASSERT_EX __TBB_ASSERT</span>
+<a name="l00170"></a>00170 <span class="preprocessor"></span>
+<a name="l00172"></a>00172     <a class="code" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a> __TBB_EXPORTED_FUNC <a class="code" href="a00272.html#823fa1c15dd829d1d9167157450ddcd9">set_assertion_handler</a>( <a class="code" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">assertion_handler_type</a> new_handler );
+<a name="l00173"></a>00173 
+<a name="l00175"></a>00175 
+<a name="l00178"></a>00178     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC <a class="code" href="a00272.html#3d1252787be39b4aef311f1cadaff9e8">assertion_failure</a>( <span class="keyword">const</span> <span class="keywordtype">char</span>* filename, <span class="keywordtype">int</span> line, <span class="keyword">const</span> <span class="keywordtype">char</span>* expression, <span class="keyword">const</span> <span class="keywordtype">char</span>* comment );
+<a name="l00179"></a>00179 
+<a name="l00180"></a>00180 <span class="preprocessor">#else</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span>
+<a name="l00183"></a>00183 <span class="preprocessor">#define __TBB_ASSERT(predicate,comment) ((void)0)</span>
+<a name="l00185"></a>00185 <span class="preprocessor">#define __TBB_ASSERT_EX(predicate,comment) ((void)(1 &amp;&amp; (predicate)))</span>
+<a name="l00186"></a>00186 <span class="preprocessor"></span>
+<a name="l00187"></a>00187 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00188"></a>00188 
+<a name="l00190"></a>00190 
+<a name="l00194"></a>00194 <span class="keyword">extern</span> <span class="stringliteral">"C"</span> <span class="keywordtype">int</span> __TBB_EXPORTED_FUNC <a class="code" href="a00272.html#a6858b22e90041c9c4669674ff39b056">TBB_runtime_interface_version</a>();
+<a name="l00195"></a>00195 
+<a name="l00197"></a>00197 
+<a name="l00201"></a><a class="code" href="a00203.html">00201</a> <span class="keyword">class </span><a class="code" href="a00203.html">split</a> {
+<a name="l00202"></a>00202 };
+<a name="l00203"></a>00203 
+<a name="l00208"></a>00208 <span class="keyword">namespace </span>internal {
+<a name="l00209"></a>00209 
+<a name="l00211"></a>00211 
+<a name="l00214"></a>00214 <span class="keyword">const</span> size_t NFS_MaxLineSize = 128;
+<a name="l00215"></a>00215 
+<a name="l00216"></a>00216 <span class="keyword">template</span>&lt;<span class="keyword">class</span> T, <span class="keywordtype">int</span> S&gt;
+<a name="l00217"></a>00217 <span class="keyword">struct </span>padded_base : T {
+<a name="l00218"></a>00218     <span class="keywordtype">char</span> pad[NFS_MaxLineSize - <span class="keyword">sizeof</span>(T) % NFS_MaxLineSize];
+<a name="l00219"></a>00219 };
+<a name="l00220"></a>00220 <span class="keyword">template</span>&lt;<span class="keyword">class</span> T&gt; <span class="keyword">struct </span>padded_base&lt;T, 0&gt; : T {};
+<a name="l00221"></a>00221 
+<a name="l00223"></a>00223 <span class="keyword">template</span>&lt;<span class="keyword">class</span> T&gt;
+<a name="l00224"></a>00224 <span class="keyword">struct </span>padded : padded_base&lt;T, sizeof(T)&gt; {};
+<a name="l00225"></a>00225 
+<a name="l00227"></a>00227 
+<a name="l00229"></a>00229 <span class="preprocessor">#define __TBB_offsetof(class_name, member_name) \</span>
+<a name="l00230"></a>00230 <span class="preprocessor">    ((ptrdiff_t)&amp;(reinterpret_cast&lt;class_name*&gt;(0x1000)-&gt;member_name) - 0x1000)</span>
+<a name="l00231"></a>00231 <span class="preprocessor"></span>
+<a name="l00233"></a>00233 <span class="preprocessor">#define __TBB_get_object_ref(class_name, member_name, member_addr) \</span>
+<a name="l00234"></a>00234 <span class="preprocessor">    (*reinterpret_cast&lt;class_name*&gt;((char*)member_addr - __TBB_offsetof(class_name, member_name)))</span>
+<a name="l00235"></a>00235 <span class="preprocessor"></span>
+<a name="l00237"></a>00237 <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC handle_perror( <span class="keywordtype">int</span> error_code, <span class="keyword">const</span> <span class="keywordtype">char</span>* aux_info );
+<a name="l00238"></a>00238 
+<a name="l00239"></a>00239 <span class="preprocessor">#if TBB_USE_EXCEPTIONS</span>
+<a name="l00240"></a>00240 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_TRY try</span>
+<a name="l00241"></a>00241 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_CATCH(e) catch(e)</span>
+<a name="l00242"></a>00242 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_THROW(e) throw e</span>
+<a name="l00243"></a>00243 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_RETHROW() throw</span>
+<a name="l00244"></a>00244 <span class="preprocessor"></span><span class="preprocessor">#else </span><span class="comment">/* !TBB_USE_EXCEPTIONS */</span>
+<a name="l00245"></a>00245     <span class="keyword">inline</span> <span class="keywordtype">bool</span> __TBB_false() { <span class="keywordflow">return</span> <span class="keyword">false</span>; }
+<a name="l00246"></a>00246 <span class="preprocessor">    #define __TBB_TRY</span>
+<a name="l00247"></a>00247 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() )</span>
+<a name="l00248"></a>00248 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_THROW(e) ((void)0)</span>
+<a name="l00249"></a>00249 <span class="preprocessor"></span><span class="preprocessor">    #define __TBB_RETHROW() ((void)0)</span>
+<a name="l00250"></a>00250 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* !TBB_USE_EXCEPTIONS */</span>
+<a name="l00251"></a>00251 
+<a name="l00253"></a>00253 <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC runtime_warning( <span class="keyword">const</span> <span class="keywordtype">char</span>* format, ... );
+<a name="l00254"></a>00254 
+<a name="l00255"></a>00255 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00256"></a>00256 <span class="preprocessor"></span><span class="keyword">static</span> <span class="keywordtype">void</span>* <span class="keyword">const</span> poisoned_ptr = reinterpret_cast&lt;void*&gt;(-1);
+<a name="l00257"></a>00257 
+<a name="l00259"></a>00259 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00260"></a>00260 <span class="keyword">inline</span> <span class="keywordtype">void</span> poison_pointer( T*&amp; p ) { p = reinterpret_cast&lt;T*&gt;(poisoned_ptr); }
+<a name="l00261"></a>00261 
+<a name="l00263"></a>00263 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00264"></a>00264 <span class="keyword">inline</span> <span class="keywordtype">bool</span> is_poisoned( T* p ) { <span class="keywordflow">return</span> p == reinterpret_cast&lt;T*&gt;(poisoned_ptr); }
+<a name="l00265"></a>00265 <span class="preprocessor">#else</span>
+<a name="l00266"></a>00266 <span class="preprocessor"></span><span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00267"></a>00267 <span class="keyword">inline</span> <span class="keywordtype">void</span> poison_pointer( T* ) {<span class="comment">/*do nothing*/</span>}
+<a name="l00268"></a>00268 <span class="preprocessor">#endif </span><span class="comment">/* !TBB_USE_ASSERT */</span>
+<a name="l00269"></a>00269 
+<a name="l00271"></a>00271 
+<a name="l00273"></a>00273 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T, <span class="keyword">typename</span> U&gt; 
+<a name="l00274"></a>00274 <span class="keyword">inline</span> T punned_cast( U* ptr ) {
+<a name="l00275"></a>00275     uintptr_t x = reinterpret_cast&lt;uintptr_t&gt;(ptr);
+<a name="l00276"></a>00276     <span class="keywordflow">return</span> reinterpret_cast&lt;T&gt;(x);
+<a name="l00277"></a>00277 }
+<a name="l00278"></a>00278 
+<a name="l00280"></a>00280 <span class="keyword">class </span>no_assign {
+<a name="l00281"></a>00281     <span class="comment">// Deny assignment</span>
+<a name="l00282"></a>00282     <span class="keywordtype">void</span> operator=( <span class="keyword">const</span> no_assign&amp; );
+<a name="l00283"></a>00283 <span class="keyword">public</span>:
+<a name="l00284"></a>00284 <span class="preprocessor">#if __GNUC__</span>
+<a name="l00286"></a>00286 <span class="preprocessor">    no_assign() {}</span>
+<a name="l00287"></a>00287 <span class="preprocessor"></span><span class="preprocessor">#endif </span><span class="comment">/* __GNUC__ */</span>
+<a name="l00288"></a>00288 };
+<a name="l00289"></a>00289 
+<a name="l00291"></a>00291 <span class="keyword">class </span>no_copy: no_assign {
+<a name="l00293"></a>00293     no_copy( <span class="keyword">const</span> no_copy&amp; );
+<a name="l00294"></a>00294 <span class="keyword">public</span>:
+<a name="l00296"></a>00296     no_copy() {}
+<a name="l00297"></a>00297 };
+<a name="l00298"></a>00298 
+<a name="l00300"></a>00300 <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt;
+<a name="l00301"></a>00301 <span class="keyword">struct </span>allocator_type {
+<a name="l00302"></a>00302     <span class="keyword">typedef</span> T value_type;
+<a name="l00303"></a>00303 };
+<a name="l00304"></a>00304 
+<a name="l00305"></a>00305 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00307"></a>00307 <span class="preprocessor">template&lt;typename T&gt;</span>
+<a name="l00308"></a>00308 <span class="preprocessor"></span><span class="keyword">struct </span>allocator_type&lt;const T&gt; {
+<a name="l00309"></a>00309     <span class="keyword">typedef</span> T value_type;
+<a name="l00310"></a>00310 };
+<a name="l00311"></a>00311 <span class="preprocessor">#endif</span>
+<a name="l00312"></a>00312 <span class="preprocessor"></span>
+<a name="l00313"></a>00313 <span class="comment">// Struct to be used as a version tag for inline functions.</span>
+<a name="l00316"></a>00316 <span class="comment"></span><span class="keyword">struct </span>version_tag_v3 {};
+<a name="l00317"></a>00317 
+<a name="l00318"></a>00318 <span class="keyword">typedef</span> version_tag_v3 version_tag;
+<a name="l00319"></a>00319 
+<a name="l00320"></a>00320 } <span class="comment">// internal</span>
+<a name="l00322"></a>00322 <span class="comment"></span>
+<a name="l00323"></a>00323 } <span class="comment">// tbb</span>
+<a name="l00324"></a>00324 
+<a name="l00325"></a>00325 <span class="preprocessor">#endif </span><span class="comment">/* RC_INVOKED */</span>
+<a name="l00326"></a>00326 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_stddef_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00478.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00478.html
new file mode 100644 (file)
index 0000000..fe48670
--- /dev/null
@@ -0,0 +1,297 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbb_thread.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbb_thread.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tbb_thread_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbb_thread_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00025"></a>00025 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00026"></a>00026 <span class="preprocessor">#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* )</span>
+<a name="l00028"></a>00028 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_NATIVE_THREAD_ROUTINE void*</span>
+<a name="l00030"></a>00030 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* )</span>
+<a name="l00031"></a>00031 <span class="preprocessor"></span><span class="preprocessor">#include &lt;pthread.h&gt;</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00033"></a>00033 <span class="preprocessor"></span>
+<a name="l00034"></a>00034 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00035"></a>00035 <span class="preprocessor">#include "tick_count.h"</span>
+<a name="l00036"></a>00036 
+<a name="l00037"></a>00037 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00038"></a>00038 <span class="preprocessor"></span>    <span class="comment">// Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers</span>
+<a name="l00039"></a>00039 <span class="preprocessor">    #pragma warning (push)</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (disable: 4530)</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00042"></a>00042 <span class="preprocessor"></span>
+<a name="l00043"></a>00043 <span class="preprocessor">#include &lt;iosfwd&gt;</span>
+<a name="l00044"></a>00044 
+<a name="l00045"></a>00045 <span class="preprocessor">#if !TBB_USE_EXCEPTIONS &amp;&amp; _MSC_VER</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span><span class="preprocessor">    #pragma warning (pop)</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span>
+<a name="l00049"></a>00049 <span class="keyword">namespace </span>tbb {
+<a name="l00050"></a>00050 
+<a name="l00052"></a>00052 <span class="keyword">namespace </span>internal {
+<a name="l00053"></a>00053     
+<a name="l00054"></a>00054     <span class="keyword">class </span>tbb_thread_v3;
+<a name="l00055"></a>00055 
+<a name="l00056"></a>00056 } <span class="comment">// namespace internal</span>
+<a name="l00057"></a>00057 
+<a name="l00058"></a>00058 <span class="keywordtype">void</span> swap( internal::tbb_thread_v3&amp; t1, internal::tbb_thread_v3&amp; t2 ); 
+<a name="l00059"></a>00059 
+<a name="l00060"></a>00060 <span class="keyword">namespace </span>internal {
+<a name="l00061"></a>00061 
+<a name="l00063"></a>00063     <span class="keywordtype">void</span>* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size );
+<a name="l00065"></a>00065     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC free_closure_v3( <span class="keywordtype">void</span>* );
+<a name="l00066"></a>00066    
+<a name="l00067"></a>00067     <span class="keyword">struct </span>thread_closure_base {
+<a name="l00068"></a>00068         <span class="keywordtype">void</span>* operator new( size_t size ) {<span class="keywordflow">return</span> allocate_closure_v3(size);}
+<a name="l00069"></a>00069         <span class="keywordtype">void</span> operator delete( <span class="keywordtype">void</span>* ptr ) {free_closure_v3(ptr);}
+<a name="l00070"></a>00070     };
+<a name="l00071"></a>00071 
+<a name="l00072"></a>00072     <span class="keyword">template</span>&lt;<span class="keyword">class</span> F&gt; <span class="keyword">struct </span>thread_closure_0: thread_closure_base {
+<a name="l00073"></a>00073         F function;
+<a name="l00074"></a>00074 
+<a name="l00075"></a>00075         <span class="keyword">static</span> __TBB_NATIVE_THREAD_ROUTINE start_routine( <span class="keywordtype">void</span>* c ) {
+<a name="l00076"></a>00076             thread_closure_0 *<span class="keyword">self</span> = static_cast&lt;thread_closure_0*&gt;(c);
+<a name="l00077"></a>00077             <span class="keyword">self</span>-&gt;function();
+<a name="l00078"></a>00078             <span class="keyword">delete</span> <span class="keyword">self</span>;
+<a name="l00079"></a>00079             <span class="keywordflow">return</span> 0;
+<a name="l00080"></a>00080         }
+<a name="l00081"></a>00081         thread_closure_0( <span class="keyword">const</span> F&amp; f ) : function(f) {}
+<a name="l00082"></a>00082     };
+<a name="l00084"></a>00084     <span class="keyword">template</span>&lt;<span class="keyword">class</span> F, <span class="keyword">class</span> X&gt; <span class="keyword">struct </span>thread_closure_1: thread_closure_base {
+<a name="l00085"></a>00085         F function;
+<a name="l00086"></a>00086         X arg1;
+<a name="l00088"></a>00088         <span class="keyword">static</span> __TBB_NATIVE_THREAD_ROUTINE start_routine( <span class="keywordtype">void</span>* c ) {
+<a name="l00089"></a>00089             thread_closure_1 *<span class="keyword">self</span> = static_cast&lt;thread_closure_1*&gt;(c);
+<a name="l00090"></a>00090             <span class="keyword">self</span>-&gt;function(self-&gt;arg1);
+<a name="l00091"></a>00091             <span class="keyword">delete</span> <span class="keyword">self</span>;
+<a name="l00092"></a>00092             <span class="keywordflow">return</span> 0;
+<a name="l00093"></a>00093         }
+<a name="l00094"></a>00094         thread_closure_1( <span class="keyword">const</span> F&amp; f, <span class="keyword">const</span> X&amp; x ) : function(f), arg1(x) {}
+<a name="l00095"></a>00095     };
+<a name="l00096"></a>00096     <span class="keyword">template</span>&lt;<span class="keyword">class</span> F, <span class="keyword">class</span> X, <span class="keyword">class</span> Y&gt; <span class="keyword">struct </span>thread_closure_2: thread_closure_base {
+<a name="l00097"></a>00097         F function;
+<a name="l00098"></a>00098         X arg1;
+<a name="l00099"></a>00099         Y arg2;
+<a name="l00101"></a>00101         <span class="keyword">static</span> __TBB_NATIVE_THREAD_ROUTINE start_routine( <span class="keywordtype">void</span>* c ) {
+<a name="l00102"></a>00102             thread_closure_2 *<span class="keyword">self</span> = static_cast&lt;thread_closure_2*&gt;(c);
+<a name="l00103"></a>00103             <span class="keyword">self</span>-&gt;function(self-&gt;arg1, self-&gt;arg2);
+<a name="l00104"></a>00104             <span class="keyword">delete</span> <span class="keyword">self</span>;
+<a name="l00105"></a>00105             <span class="keywordflow">return</span> 0;
+<a name="l00106"></a>00106         }
+<a name="l00107"></a>00107         thread_closure_2( <span class="keyword">const</span> F&amp; f, <span class="keyword">const</span> X&amp; x, <span class="keyword">const</span> Y&amp; y ) : function(f), arg1(x), arg2(y) {}
+<a name="l00108"></a>00108     };
+<a name="l00109"></a>00109 
+<a name="l00111"></a>00111     <span class="keyword">class </span>tbb_thread_v3 {
+<a name="l00112"></a>00112         tbb_thread_v3(<span class="keyword">const</span> tbb_thread_v3&amp;); <span class="comment">// = delete;   // Deny access</span>
+<a name="l00113"></a>00113     <span class="keyword">public</span>:
+<a name="l00114"></a>00114 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00115"></a>00115 <span class="preprocessor"></span>        <span class="keyword">typedef</span> HANDLE native_handle_type; 
+<a name="l00116"></a>00116 <span class="preprocessor">#else</span>
+<a name="l00117"></a>00117 <span class="preprocessor"></span>        <span class="keyword">typedef</span> pthread_t native_handle_type; 
+<a name="l00118"></a>00118 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00119"></a>00119 <span class="preprocessor"></span>
+<a name="l00120"></a>00120         <span class="keyword">class </span>id;
+<a name="l00122"></a>00122         tbb_thread_v3() : my_handle(0)
+<a name="l00123"></a>00123 #if _WIN32||_WIN64
+<a name="l00124"></a>00124             , my_thread_id(0)
+<a name="l00125"></a>00125 #endif <span class="comment">// _WIN32||_WIN64</span>
+<a name="l00126"></a>00126         {}
+<a name="l00127"></a>00127         
+<a name="l00129"></a>00129         <span class="keyword">template</span> &lt;<span class="keyword">class</span> F&gt; <span class="keyword">explicit</span> tbb_thread_v3(F f) {
+<a name="l00130"></a>00130             <span class="keyword">typedef</span> internal::thread_closure_0&lt;F&gt; closure_type;
+<a name="l00131"></a>00131             internal_start(closure_type::start_routine, <span class="keyword">new</span> closure_type(f));
+<a name="l00132"></a>00132         }
+<a name="l00134"></a>00134         <span class="keyword">template</span> &lt;<span class="keyword">class</span> F, <span class="keyword">class</span> X&gt; tbb_thread_v3(F f, X x) {
+<a name="l00135"></a>00135             <span class="keyword">typedef</span> internal::thread_closure_1&lt;F,X&gt; closure_type;
+<a name="l00136"></a>00136             internal_start(closure_type::start_routine, <span class="keyword">new</span> closure_type(f,x));
+<a name="l00137"></a>00137         }
+<a name="l00139"></a>00139         <span class="keyword">template</span> &lt;<span class="keyword">class</span> F, <span class="keyword">class</span> X, <span class="keyword">class</span> Y&gt; tbb_thread_v3(F f, X x, Y y) {
+<a name="l00140"></a>00140             <span class="keyword">typedef</span> internal::thread_closure_2&lt;F,X,Y&gt; closure_type;
+<a name="l00141"></a>00141             internal_start(closure_type::start_routine, <span class="keyword">new</span> closure_type(f,x,y));
+<a name="l00142"></a>00142         }
+<a name="l00143"></a>00143 
+<a name="l00144"></a>00144         tbb_thread_v3&amp; operator=(tbb_thread_v3&amp; x) {
+<a name="l00145"></a>00145             <span class="keywordflow">if</span> (joinable()) detach();
+<a name="l00146"></a>00146             my_handle = x.my_handle;
+<a name="l00147"></a>00147             x.my_handle = 0;
+<a name="l00148"></a>00148 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00149"></a>00149 <span class="preprocessor"></span>            my_thread_id = x.my_thread_id;
+<a name="l00150"></a>00150             x.my_thread_id = 0;
+<a name="l00151"></a>00151 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00152"></a>00152 <span class="preprocessor"></span>            <span class="keywordflow">return</span> *<span class="keyword">this</span>;
+<a name="l00153"></a>00153         }
+<a name="l00154"></a>00154         <span class="keywordtype">void</span> swap( tbb_thread_v3&amp; t ) {tbb::swap( *<span class="keyword">this</span>, t );}
+<a name="l00155"></a>00155         <span class="keywordtype">bool</span> joinable()<span class="keyword"> const </span>{<span class="keywordflow">return</span> my_handle!=0; }
+<a name="l00157"></a>00157         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD join();
+<a name="l00159"></a>00159         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD detach();
+<a name="l00160"></a>00160         ~tbb_thread_v3() {<span class="keywordflow">if</span>( joinable() ) detach();}
+<a name="l00161"></a>00161         <span class="keyword">inline</span> <span class="keywordtype">id</span> get_id() <span class="keyword">const</span>;
+<a name="l00162"></a>00162         native_handle_type native_handle() { <span class="keywordflow">return</span> my_handle; }
+<a name="l00163"></a>00163     
+<a name="l00165"></a>00165         <span class="keyword">static</span> <span class="keywordtype">unsigned</span> __TBB_EXPORTED_FUNC hardware_concurrency();
+<a name="l00166"></a>00166     <span class="keyword">private</span>:
+<a name="l00167"></a>00167         native_handle_type my_handle; 
+<a name="l00168"></a>00168 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00169"></a>00169 <span class="preprocessor"></span>        DWORD my_thread_id;
+<a name="l00170"></a>00170 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00171"></a>00171 <span class="preprocessor"></span>
+<a name="l00173"></a>00173         <span class="keywordtype">void</span> __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), 
+<a name="l00174"></a>00174                              <span class="keywordtype">void</span>* closure );
+<a name="l00175"></a>00175         <span class="keyword">friend</span> <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3&amp; t1, tbb_thread_v3&amp; t2 );
+<a name="l00176"></a>00176         <span class="keyword">friend</span> <span class="keywordtype">void</span> tbb::swap( tbb_thread_v3&amp; t1, tbb_thread_v3&amp; t2 ); 
+<a name="l00177"></a>00177     };
+<a name="l00178"></a>00178         
+<a name="l00179"></a>00179     <span class="keyword">class </span>tbb_thread_v3::id { 
+<a name="l00180"></a>00180 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00181"></a>00181 <span class="preprocessor"></span>        DWORD my_id;
+<a name="l00182"></a>00182         id( DWORD id_ ) : my_id(id_) {}
+<a name="l00183"></a>00183 <span class="preprocessor">#else</span>
+<a name="l00184"></a>00184 <span class="preprocessor"></span>        pthread_t my_id;
+<a name="l00185"></a>00185         id( pthread_t id_ ) : my_id(id_) {}
+<a name="l00186"></a>00186 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00187"></a>00187 <span class="preprocessor"></span>        <span class="keyword">friend</span> <span class="keyword">class </span>tbb_thread_v3;
+<a name="l00188"></a>00188     <span class="keyword">public</span>:
+<a name="l00189"></a>00189         id() : my_id(0) {}
+<a name="l00190"></a>00190 
+<a name="l00191"></a>00191         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator==( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00192"></a>00192         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00193"></a>00193         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&lt;( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00194"></a>00194         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&lt;=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00195"></a>00195         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&gt;( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00196"></a>00196         <span class="keyword">friend</span> <span class="keywordtype">bool</span> operator&gt;=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+<a name="l00197"></a>00197         
+<a name="l00198"></a>00198         <span class="keyword">template</span>&lt;<span class="keyword">class</span> <span class="keywordtype">char</span>T, <span class="keyword">class</span> traits&gt;
+<a name="l00199"></a>00199         <span class="keyword">friend</span> std::basic_ostream&lt;charT, traits&gt;&amp;
+<a name="l00200"></a>00200         operator&lt;&lt; (std::basic_ostream&lt;charT, traits&gt; &amp;out, 
+<a name="l00201"></a>00201                     tbb_thread_v3::id id)
+<a name="l00202"></a>00202         {
+<a name="l00203"></a>00203             out &lt;&lt; <span class="keywordtype">id</span>.my_id;
+<a name="l00204"></a>00204             <span class="keywordflow">return</span> out;
+<a name="l00205"></a>00205         }
+<a name="l00206"></a>00206         <span class="keyword">friend</span> tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
+<a name="l00207"></a>00207     }; <span class="comment">// tbb_thread_v3::id</span>
+<a name="l00208"></a>00208 
+<a name="l00209"></a>00209     tbb_thread_v3::id tbb_thread_v3::get_id()<span class="keyword"> const </span>{
+<a name="l00210"></a>00210 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00211"></a>00211 <span class="preprocessor"></span>        <span class="keywordflow">return</span> id(my_thread_id);
+<a name="l00212"></a>00212 <span class="preprocessor">#else</span>
+<a name="l00213"></a>00213 <span class="preprocessor"></span>        <span class="keywordflow">return</span> id(my_handle);
+<a name="l00214"></a>00214 <span class="preprocessor">#endif // _WIN32||_WIN64</span>
+<a name="l00215"></a>00215 <span class="preprocessor"></span>    }
+<a name="l00216"></a>00216     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3&amp; t1, tbb_thread_v3&amp; t2 );
+<a name="l00217"></a>00217     tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
+<a name="l00218"></a>00218     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC thread_yield_v3();
+<a name="l00219"></a>00219     <span class="keywordtype">void</span> __TBB_EXPORTED_FUNC thread_sleep_v3(<span class="keyword">const</span> tick_count::interval_t &amp;i);
+<a name="l00220"></a>00220 
+<a name="l00221"></a>00221     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator==(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00222"></a>00222     {
+<a name="l00223"></a>00223         <span class="keywordflow">return</span> x.my_id == y.my_id;
+<a name="l00224"></a>00224     }
+<a name="l00225"></a>00225     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00226"></a>00226     {
+<a name="l00227"></a>00227         <span class="keywordflow">return</span> x.my_id != y.my_id;
+<a name="l00228"></a>00228     }
+<a name="l00229"></a>00229     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&lt;(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00230"></a>00230     {
+<a name="l00231"></a>00231         <span class="keywordflow">return</span> x.my_id &lt; y.my_id;
+<a name="l00232"></a>00232     }
+<a name="l00233"></a>00233     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&lt;=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00234"></a>00234     {
+<a name="l00235"></a>00235         <span class="keywordflow">return</span> x.my_id &lt;= y.my_id;
+<a name="l00236"></a>00236     }
+<a name="l00237"></a>00237     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&gt;(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00238"></a>00238     {
+<a name="l00239"></a>00239         <span class="keywordflow">return</span> x.my_id &gt; y.my_id;
+<a name="l00240"></a>00240     }
+<a name="l00241"></a>00241     <span class="keyword">inline</span> <span class="keywordtype">bool</span> operator&gt;=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+<a name="l00242"></a>00242     {
+<a name="l00243"></a>00243         <span class="keywordflow">return</span> x.my_id &gt;= y.my_id;
+<a name="l00244"></a>00244     }
+<a name="l00245"></a>00245 
+<a name="l00246"></a>00246 } <span class="comment">// namespace internal;</span>
+<a name="l00247"></a>00247 
+<a name="l00249"></a>00249 <span class="keyword">typedef</span> internal::tbb_thread_v3 tbb_thread;
+<a name="l00250"></a>00250 
+<a name="l00251"></a>00251 <span class="keyword">using</span> internal::operator==;
+<a name="l00252"></a>00252 <span class="keyword">using</span> internal::operator!=;
+<a name="l00253"></a>00253 <span class="keyword">using</span> internal::operator&lt;;
+<a name="l00254"></a>00254 <span class="keyword">using</span> internal::operator&gt;;
+<a name="l00255"></a>00255 <span class="keyword">using</span> internal::operator&lt;=;
+<a name="l00256"></a>00256 <span class="keyword">using</span> internal::operator&gt;=;
+<a name="l00257"></a>00257 
+<a name="l00258"></a>00258 <span class="keyword">inline</span> <span class="keywordtype">void</span> move( tbb_thread&amp; t1, tbb_thread&amp; t2 ) {
+<a name="l00259"></a>00259     internal::move_v3(t1, t2);
+<a name="l00260"></a>00260 }
+<a name="l00261"></a>00261 
+<a name="l00262"></a>00262 <span class="keyword">inline</span> <span class="keywordtype">void</span> swap( internal::tbb_thread_v3&amp; t1, internal::tbb_thread_v3&amp; t2 ) {
+<a name="l00263"></a>00263     tbb::tbb_thread::native_handle_type h = t1.my_handle;
+<a name="l00264"></a>00264     t1.my_handle = t2.my_handle;
+<a name="l00265"></a>00265     t2.my_handle = h;
+<a name="l00266"></a>00266 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00267"></a>00267 <span class="preprocessor"></span>    DWORD i = t1.my_thread_id;
+<a name="l00268"></a>00268     t1.my_thread_id = t2.my_thread_id;
+<a name="l00269"></a>00269     t2.my_thread_id = i;
+<a name="l00270"></a>00270 <span class="preprocessor">#endif </span><span class="comment">/* _WIN32||_WIN64 */</span>
+<a name="l00271"></a>00271 }
+<a name="l00272"></a>00272 
+<a name="l00273"></a>00273 <span class="keyword">namespace </span>this_tbb_thread {
+<a name="l00274"></a>00274     <span class="keyword">inline</span> tbb_thread::id get_id() { <span class="keywordflow">return</span> internal::thread_get_id_v3(); }
+<a name="l00276"></a>00276     <span class="keyword">inline</span> <span class="keywordtype">void</span> yield() { internal::thread_yield_v3(); }
+<a name="l00278"></a>00278     <span class="keyword">inline</span> <span class="keywordtype">void</span> sleep(<span class="keyword">const</span> tick_count::interval_t &amp;i) { 
+<a name="l00279"></a>00279         internal::thread_sleep_v3(i);  
+<a name="l00280"></a>00280     }
+<a name="l00281"></a>00281 }  <span class="comment">// namespace this_tbb_thread</span>
+<a name="l00282"></a>00282 
+<a name="l00283"></a>00283 } <span class="comment">// namespace tbb</span>
+<a name="l00284"></a>00284 
+<a name="l00285"></a>00285 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tbb_thread_H */</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00486.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00486.html
new file mode 100644 (file)
index 0000000..4fc22f9
--- /dev/null
@@ -0,0 +1,96 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tbbmalloc_proxy.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tbbmalloc_proxy.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="comment">/*</span>
+<a name="l00022"></a>00022 <span class="comment">Replacing the standard memory allocation routines in Microsoft* C/C++ RTL </span>
+<a name="l00023"></a>00023 <span class="comment">(malloc/free, global new/delete, etc.) with the TBB memory allocator. </span>
+<a name="l00024"></a>00024 <span class="comment"></span>
+<a name="l00025"></a>00025 <span class="comment">Include the following header to a source of any binary which is loaded during </span>
+<a name="l00026"></a>00026 <span class="comment">application startup</span>
+<a name="l00027"></a>00027 <span class="comment"></span>
+<a name="l00028"></a>00028 <span class="comment">#include "tbb/tbbmalloc_proxy.h"</span>
+<a name="l00029"></a>00029 <span class="comment"></span>
+<a name="l00030"></a>00030 <span class="comment">or add following parameters to the linker options for the binary which is </span>
+<a name="l00031"></a>00031 <span class="comment">loaded during application startup. It can be either exe-file or dll.</span>
+<a name="l00032"></a>00032 <span class="comment"></span>
+<a name="l00033"></a>00033 <span class="comment">For win32</span>
+<a name="l00034"></a>00034 <span class="comment">tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy"</span>
+<a name="l00035"></a>00035 <span class="comment">win64</span>
+<a name="l00036"></a>00036 <span class="comment">tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy"</span>
+<a name="l00037"></a>00037 <span class="comment">*/</span>
+<a name="l00038"></a>00038 
+<a name="l00039"></a>00039 <span class="preprocessor">#ifndef __TBB_tbbmalloc_proxy_H</span>
+<a name="l00040"></a>00040 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tbbmalloc_proxy_H</span>
+<a name="l00041"></a>00041 <span class="preprocessor"></span>
+<a name="l00042"></a>00042 <span class="preprocessor">#if _MSC_VER</span>
+<a name="l00043"></a>00043 <span class="preprocessor"></span>
+<a name="l00044"></a>00044 <span class="preprocessor">#ifdef _DEBUG</span>
+<a name="l00045"></a>00045 <span class="preprocessor"></span><span class="preprocessor">    #pragma comment(lib, "tbbmalloc_proxy_debug.lib")</span>
+<a name="l00046"></a>00046 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00047"></a>00047 <span class="preprocessor"></span><span class="preprocessor">    #pragma comment(lib, "tbbmalloc_proxy.lib")</span>
+<a name="l00048"></a>00048 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00049"></a>00049 <span class="preprocessor"></span>
+<a name="l00050"></a>00050 <span class="preprocessor">#if defined(_WIN64)</span>
+<a name="l00051"></a>00051 <span class="preprocessor"></span><span class="preprocessor">    #pragma comment(linker, "/include:__TBB_malloc_proxy")</span>
+<a name="l00052"></a>00052 <span class="preprocessor"></span><span class="preprocessor">#else</span>
+<a name="l00053"></a>00053 <span class="preprocessor"></span><span class="preprocessor">    #pragma comment(linker, "/include:___TBB_malloc_proxy")</span>
+<a name="l00054"></a>00054 <span class="preprocessor"></span><span class="preprocessor">#endif</span>
+<a name="l00055"></a>00055 <span class="preprocessor"></span>
+<a name="l00056"></a>00056 <span class="preprocessor">#else</span>
+<a name="l00057"></a>00057 <span class="preprocessor"></span><span class="comment">/* Primarily to support MinGW */</span>
+<a name="l00058"></a>00058 
+<a name="l00059"></a>00059 <span class="keyword">extern</span> <span class="stringliteral">"C"</span> <span class="keywordtype">void</span> __TBB_malloc_proxy();
+<a name="l00060"></a>00060 <span class="keyword">struct </span>__TBB_malloc_proxy_caller {
+<a name="l00061"></a>00061     __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); }
+<a name="l00062"></a>00062 } <span class="keyword">volatile</span> __TBB_malloc_proxy_helper_object;
+<a name="l00063"></a>00063 
+<a name="l00064"></a>00064 <span class="preprocessor">#endif // _MSC_VER</span>
+<a name="l00065"></a>00065 <span class="preprocessor"></span>
+<a name="l00066"></a>00066 <span class="preprocessor">#endif //__TBB_tbbmalloc_proxy_H</span>
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00488.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/a00488.html
new file mode 100644 (file)
index 0000000..5fcd526
--- /dev/null
@@ -0,0 +1,164 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>tick_count.h Source File</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>tick_count.h</h1><div class="fragment"><pre class="fragment"><a name="l00001"></a>00001 <span class="comment">/*</span>
+<a name="l00002"></a>00002 <span class="comment">    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.</span>
+<a name="l00003"></a>00003 <span class="comment"></span>
+<a name="l00004"></a>00004 <span class="comment">    The source code contained or described herein and all documents related</span>
+<a name="l00005"></a>00005 <span class="comment">    to the source code ("Material") are owned by Intel Corporation or its</span>
+<a name="l00006"></a>00006 <span class="comment">    suppliers or licensors.  Title to the Material remains with Intel</span>
+<a name="l00007"></a>00007 <span class="comment">    Corporation or its suppliers and licensors.  The Material is protected</span>
+<a name="l00008"></a>00008 <span class="comment">    by worldwide copyright laws and treaty provisions.  No part of the</span>
+<a name="l00009"></a>00009 <span class="comment">    Material may be used, copied, reproduced, modified, published, uploaded,</span>
+<a name="l00010"></a>00010 <span class="comment">    posted, transmitted, distributed, or disclosed in any way without</span>
+<a name="l00011"></a>00011 <span class="comment">    Intel's prior express written permission.</span>
+<a name="l00012"></a>00012 <span class="comment"></span>
+<a name="l00013"></a>00013 <span class="comment">    No license under any patent, copyright, trade secret or other</span>
+<a name="l00014"></a>00014 <span class="comment">    intellectual property right is granted to or conferred upon you by</span>
+<a name="l00015"></a>00015 <span class="comment">    disclosure or delivery of the Materials, either expressly, by</span>
+<a name="l00016"></a>00016 <span class="comment">    implication, inducement, estoppel or otherwise.  Any license under such</span>
+<a name="l00017"></a>00017 <span class="comment">    intellectual property rights must be express and approved by Intel in</span>
+<a name="l00018"></a>00018 <span class="comment">    writing.</span>
+<a name="l00019"></a>00019 <span class="comment">*/</span>
+<a name="l00020"></a>00020 
+<a name="l00021"></a>00021 <span class="preprocessor">#ifndef __TBB_tick_count_H</span>
+<a name="l00022"></a>00022 <span class="preprocessor"></span><span class="preprocessor">#define __TBB_tick_count_H</span>
+<a name="l00023"></a>00023 <span class="preprocessor"></span>
+<a name="l00024"></a>00024 <span class="preprocessor">#include "tbb_stddef.h"</span>
+<a name="l00025"></a>00025 
+<a name="l00026"></a>00026 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00027"></a>00027 <span class="preprocessor"></span><span class="preprocessor">#include "machine/windows_api.h"</span>
+<a name="l00028"></a>00028 <span class="preprocessor">#elif __linux__</span>
+<a name="l00029"></a>00029 <span class="preprocessor"></span><span class="preprocessor">#include &lt;ctime&gt;</span>
+<a name="l00030"></a>00030 <span class="preprocessor">#else </span><span class="comment">/* generic Unix */</span>
+<a name="l00031"></a>00031 <span class="preprocessor">#include &lt;sys/time.h&gt;</span>
+<a name="l00032"></a>00032 <span class="preprocessor">#endif </span><span class="comment">/* (choice of OS) */</span>
+<a name="l00033"></a>00033 
+<a name="l00034"></a>00034 <span class="keyword">namespace </span>tbb {
+<a name="l00035"></a>00035 
+<a name="l00037"></a>00037 
+<a name="l00038"></a><a class="code" href="a00215.html">00038</a> <span class="keyword">class </span><a class="code" href="a00215.html">tick_count</a> {
+<a name="l00039"></a>00039 <span class="keyword">public</span>:
+<a name="l00041"></a><a class="code" href="a00216.html">00041</a>     <span class="keyword">class </span><a class="code" href="a00216.html">interval_t</a> {
+<a name="l00042"></a>00042         <span class="keywordtype">long</span> <span class="keywordtype">long</span> value;
+<a name="l00043"></a>00043         <span class="keyword">explicit</span> <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>( <span class="keywordtype">long</span> <span class="keywordtype">long</span> value_ ) : value(value_) {}
+<a name="l00044"></a>00044     <span class="keyword">public</span>:
+<a name="l00046"></a><a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">00046</a>         <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>() : value(0) {};
+<a name="l00047"></a>00047 
+<a name="l00049"></a>00049         <span class="keyword">explicit</span> <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>( <span class="keywordtype">double</span> sec );
+<a name="l00050"></a>00050 
+<a name="l00052"></a>00052         <span class="keywordtype">double</span> <a class="code" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">seconds</a>() <span class="keyword">const</span>;
+<a name="l00053"></a>00053 
+<a name="l00054"></a>00054         <span class="keyword">friend</span> <span class="keyword">class </span><a class="code" href="a00215.html">tbb::tick_count</a>;
+<a name="l00055"></a>00055 
+<a name="l00057"></a>00057         <span class="keyword">friend</span> <a class="code" href="a00216.html">interval_t</a> <a class="code" href="a00216.html#09dde78a4100800c11bb883d6204b586">operator-</a>( <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t1, <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t0 );
+<a name="l00058"></a>00058 
+<a name="l00060"></a><a class="code" href="a00216.html#5871ead1ca230efbe52a5008470e6428">00060</a>         <span class="keyword">friend</span> <a class="code" href="a00216.html">interval_t</a> <a class="code" href="a00216.html#5871ead1ca230efbe52a5008470e6428">operator+</a>( <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; i, <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; j ) {
+<a name="l00061"></a>00061             <span class="keywordflow">return</span> <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>(i.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>+j.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>);
+<a name="l00062"></a>00062         }
+<a name="l00063"></a>00063 
+<a name="l00065"></a><a class="code" href="a00216.html#fa509691e1d689830931e36edd274f76">00065</a>         <span class="keyword">friend</span> <a class="code" href="a00216.html">interval_t</a> <a class="code" href="a00216.html#09dde78a4100800c11bb883d6204b586">operator-</a>( <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; i, <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; j ) {
+<a name="l00066"></a>00066             <span class="keywordflow">return</span> <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">interval_t</a>(i.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>-j.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>);
+<a name="l00067"></a>00067         }
+<a name="l00068"></a>00068 
+<a name="l00070"></a><a class="code" href="a00216.html#cd9814947902e26463a69a111530f81b">00070</a>         <a class="code" href="a00216.html">interval_t</a>&amp; <a class="code" href="a00216.html#cd9814947902e26463a69a111530f81b">operator+=</a>( <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; i ) {value += i.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>; <span class="keywordflow">return</span> *<span class="keyword">this</span>;}
+<a name="l00071"></a>00071 
+<a name="l00073"></a><a class="code" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">00073</a>         <a class="code" href="a00216.html">interval_t</a>&amp; <a class="code" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">operator-=</a>( <span class="keyword">const</span> <a class="code" href="a00216.html">interval_t</a>&amp; i ) {value -= i.<a class="code" href="a00216.html#beea71acda120af3e3e29f10e8cb5c10">value</a>; <span class="keywordflow">return</span> *<span class="keyword">this</span>;}
+<a name="l00074"></a>00074     };
+<a name="l00075"></a>00075     
+<a name="l00077"></a><a class="code" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">00077</a>     <a class="code" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">tick_count</a>() : my_count(0) {};
+<a name="l00078"></a>00078 
+<a name="l00080"></a>00080     <span class="keyword">static</span> <a class="code" href="a00215.html">tick_count</a> <a class="code" href="a00215.html#fb7f78ca61cf28398645ace66e284473">now</a>();
+<a name="l00081"></a>00081     
+<a name="l00083"></a>00083     <span class="keyword">friend</span> interval_t <a class="code" href="a00215.html#09dde78a4100800c11bb883d6204b586">operator-</a>( <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t1, <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t0 );
+<a name="l00084"></a>00084 
+<a name="l00085"></a>00085 <span class="keyword">private</span>:
+<a name="l00086"></a>00086     <span class="keywordtype">long</span> <span class="keywordtype">long</span> my_count;
+<a name="l00087"></a>00087 };
+<a name="l00088"></a>00088 
+<a name="l00089"></a><a class="code" href="a00215.html#fb7f78ca61cf28398645ace66e284473">00089</a> <span class="keyword">inline</span> <a class="code" href="a00215.html">tick_count</a> <a class="code" href="a00215.html#fb7f78ca61cf28398645ace66e284473">tick_count::now</a>() {
+<a name="l00090"></a>00090     <a class="code" href="a00215.html">tick_count</a> result;
+<a name="l00091"></a>00091 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00092"></a>00092 <span class="preprocessor"></span>    LARGE_INTEGER qpcnt;
+<a name="l00093"></a>00093     QueryPerformanceCounter(&amp;qpcnt);
+<a name="l00094"></a>00094     result.<a class="code" href="a00215.html#1ba05ad46dc08bc1bf34bd6cc94e36ec">my_count</a> = qpcnt.QuadPart;
+<a name="l00095"></a>00095 <span class="preprocessor">#elif __linux__</span>
+<a name="l00096"></a>00096 <span class="preprocessor"></span>    <span class="keyword">struct </span>timespec ts;
+<a name="l00097"></a>00097 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00098"></a>00098 <span class="preprocessor"></span>    <span class="keywordtype">int</span> status = 
+<a name="l00099"></a>00099 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00100"></a>00100         clock_gettime( CLOCK_REALTIME, &amp;ts );
+<a name="l00101"></a>00101     __TBB_ASSERT( status==0, <span class="stringliteral">"CLOCK_REALTIME not supported"</span> );
+<a name="l00102"></a>00102     result.<a class="code" href="a00215.html#1ba05ad46dc08bc1bf34bd6cc94e36ec">my_count</a> = static_cast&lt;long long&gt;(1000000000UL)*static_cast&lt;long long&gt;(ts.tv_sec) + static_cast&lt;long long&gt;(ts.tv_nsec);
+<a name="l00103"></a>00103 <span class="preprocessor">#else </span><span class="comment">/* generic Unix */</span>
+<a name="l00104"></a>00104     <span class="keyword">struct </span>timeval tv;
+<a name="l00105"></a>00105 <span class="preprocessor">#if TBB_USE_ASSERT</span>
+<a name="l00106"></a>00106 <span class="preprocessor"></span>    <span class="keywordtype">int</span> status = 
+<a name="l00107"></a>00107 <span class="preprocessor">#endif </span><span class="comment">/* TBB_USE_ASSERT */</span>
+<a name="l00108"></a>00108         gettimeofday(&amp;tv, NULL);
+<a name="l00109"></a>00109     __TBB_ASSERT( status==0, <span class="stringliteral">"gettimeofday failed"</span> );
+<a name="l00110"></a>00110     result.<a class="code" href="a00215.html#1ba05ad46dc08bc1bf34bd6cc94e36ec">my_count</a> = static_cast&lt;long long&gt;(1000000)*static_cast&lt;long long&gt;(tv.tv_sec) + static_cast&lt;long long&gt;(tv.tv_usec);
+<a name="l00111"></a>00111 <span class="preprocessor">#endif </span><span class="comment">/*(choice of OS) */</span>
+<a name="l00112"></a>00112     <span class="keywordflow">return</span> result;
+<a name="l00113"></a>00113 }
+<a name="l00114"></a>00114 
+<a name="l00115"></a><a class="code" href="a00216.html#1a21a428e00cced2e6a49e0f5f2258bf">00115</a> <span class="keyword">inline</span> <a class="code" href="a00216.html#75a9a0949f8a8a84d6758835f1b48dad">tick_count::interval_t::interval_t</a>( <span class="keywordtype">double</span> sec )
+<a name="l00116"></a>00116 {
+<a name="l00117"></a>00117 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00118"></a>00118 <span class="preprocessor"></span>    LARGE_INTEGER qpfreq;
+<a name="l00119"></a>00119     QueryPerformanceFrequency(&amp;qpfreq);
+<a name="l00120"></a>00120     value = static_cast&lt;long long&gt;(sec*qpfreq.QuadPart);
+<a name="l00121"></a>00121 <span class="preprocessor">#elif __linux__</span>
+<a name="l00122"></a>00122 <span class="preprocessor"></span>    value = static_cast&lt;long long&gt;(sec*1E9);
+<a name="l00123"></a>00123 <span class="preprocessor">#else </span><span class="comment">/* generic Unix */</span>
+<a name="l00124"></a>00124     value = static_cast&lt;long long&gt;(sec*1E6);
+<a name="l00125"></a>00125 <span class="preprocessor">#endif </span><span class="comment">/* (choice of OS) */</span>
+<a name="l00126"></a>00126 }
+<a name="l00127"></a>00127 
+<a name="l00128"></a><a class="code" href="a00215.html#09dde78a4100800c11bb883d6204b586">00128</a> <span class="keyword">inline</span> <a class="code" href="a00216.html">tick_count::interval_t</a> <a class="code" href="a00215.html#09dde78a4100800c11bb883d6204b586">operator-</a>( <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t1, <span class="keyword">const</span> <a class="code" href="a00215.html">tick_count</a>&amp; t0 ) {
+<a name="l00129"></a>00129     <span class="keywordflow">return</span> <a class="code" href="a00216.html">tick_count::interval_t</a>( t1.<a class="code" href="a00215.html#1ba05ad46dc08bc1bf34bd6cc94e36ec">my_count</a>-t0.<a class="code" href="a00215.html#1ba05ad46dc08bc1bf34bd6cc94e36ec">my_count</a> );
+<a name="l00130"></a>00130 }
+<a name="l00131"></a>00131 
+<a name="l00132"></a><a class="code" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">00132</a> <span class="keyword">inline</span> <span class="keywordtype">double</span> <a class="code" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">tick_count::interval_t::seconds</a>()<span class="keyword"> const </span>{
+<a name="l00133"></a>00133 <span class="preprocessor">#if _WIN32||_WIN64</span>
+<a name="l00134"></a>00134 <span class="preprocessor"></span>    LARGE_INTEGER qpfreq;
+<a name="l00135"></a>00135     QueryPerformanceFrequency(&amp;qpfreq);
+<a name="l00136"></a>00136     <span class="keywordflow">return</span> value/(double)qpfreq.QuadPart;
+<a name="l00137"></a>00137 #elif __linux__
+<a name="l00138"></a>00138     <span class="keywordflow">return</span> value*1E-9;
+<a name="l00139"></a>00139 #<span class="keywordflow">else</span> <span class="comment">/* generic Unix */</span>
+<a name="l00140"></a>00140     <span class="keywordflow">return</span> value*1E-6;
+<a name="l00141"></a>00141 #endif <span class="comment">/* (choice of OS) */</span>
+<a name="l00142"></a>00142 }
+<a name="l00143"></a>00143 
+<a name="l00144"></a>00144 } <span class="comment">// namespace tbb</span>
+<a name="l00145"></a>00145 
+<a name="l00146"></a>00146 <span class="preprocessor">#endif </span><span class="comment">/* __TBB_tick_count_H */</span>
+<a name="l00147"></a>00147 
+</pre></div><hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/annotated.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/annotated.html
new file mode 100644 (file)
index 0000000..6902abd
--- /dev/null
@@ -0,0 +1,108 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>Class List</h1>Here are the classes, structs, unions and interfaces with brief descriptions:<table>
+  <tr><td class="indexkey"><a class="el" href="a00145.html">tbb::affinity_partitioner</a></td><td class="indexvalue">An affinity partitioner </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a></td><td class="indexvalue">Block of space aligned sufficiently to construct an array T with N elements </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a></td><td class="indexvalue">Primary template for atomic </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a></td><td class="indexvalue">Specialization for <a class="el" href="a00148.html">atomic&lt;void*&gt;</a>, for sake of not allowing arithmetic or operator-&gt; </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a></td><td class="indexvalue">Class that implements exponential backoff </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00150.html">tbb::auto_partitioner</a></td><td class="indexvalue">An auto partitioner </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00151.html">tbb::bad_last_alloc</a></td><td class="indexvalue">Exception for concurrent containers </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a></td><td class="indexvalue">A range over which to iterate </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a></td><td class="indexvalue">A 2-dimensional range that models the Range concept </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a></td><td class="indexvalue">A 3-dimensional range that models the Range concept </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a></td><td class="indexvalue">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a></td><td class="indexvalue">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00157.html">tbb::captured_exception</a></td><td class="indexvalue">This class is used by TBB to propagate information about unhandled exceptions into the root thread </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a></td><td class="indexvalue">Thread-local storage with optional reduction </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></td><td class="indexvalue">A high-performance thread-safe blocking concurrent bounded queue </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></td><td class="indexvalue">Unordered map from Key to T </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a></td><td class="indexvalue">Allows write access to elements and combines data access, locking, and garbage collection </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></td><td class="indexvalue">Bucket accessor is to find, rehash, acquire a lock, and access a bucket </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></td><td class="indexvalue">Combines data access, locking, and garbage collection </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a></td><td class="indexvalue">A high-performance thread-safe non-blocking concurrent queue </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></td><td class="indexvalue">A high-performance thread-safe blocking concurrent bounded queue </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a></td><td class="indexvalue">Concurrent vector container </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00167.html">tbb::empty_task</a></td><td class="indexvalue">Task that does nothing. Useful for synchronization </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a></td><td class="indexvalue">The <a class="el" href="a00168.html">enumerable_thread_specific</a> container </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00169.html">tbb::filter</a></td><td class="indexvalue">A stage in a pipeline </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a></td><td class="indexvalue">Class representing a chain of type-safe pipeline filters </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00171.html">tbb::final_scan_tag</a></td><td class="indexvalue">Used to indicate that the final scan is being performed </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00172.html">tbb::interface5::flow_control</a></td><td class="indexvalue">Input_filter control to signal end-of-input for parallel_pipeline </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00173.html">tbb::improper_lock</a></td><td class="indexvalue">Exception for PPL locks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00174.html">tbb::invalid_multiple_scheduling</a></td><td class="indexvalue">Exception for repeated scheduling of the same task_handle </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00175.html">tbb::missing_wait</a></td><td class="indexvalue">Exception for missing wait on structured_task_group </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a></td><td class="indexvalue">Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00177.html">tbb::mutex</a></td><td class="indexvalue">Wrapper around the platform's native reader-writer lock </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a></td><td class="indexvalue">The scoped locking pattern </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00179.html">tbb::null_mutex</a></td><td class="indexvalue">A mutex which does nothing </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a></td><td class="indexvalue">Represents acquisition of a mutex </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00181.html">tbb::null_rw_mutex</a></td><td class="indexvalue">A rw mutex which does nothing </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a></td><td class="indexvalue">Represents acquisition of a mutex </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a></td><td class="indexvalue">Class the user supplied algorithm body uses to add new tasks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a></td><td class="indexvalue">Parallel iteration over a stream, with optional addition of more work </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00185.html">tbb::pipeline</a></td><td class="indexvalue">A processing pipeline that applies filters to items </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00186.html">tbb::pre_scan_tag</a></td><td class="indexvalue">Used to indicate that the initial scan is being performed </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00187.html">tbb::queuing_mutex</a></td><td class="indexvalue">Queuing lock with local-only spinning </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a></td><td class="indexvalue">The scoped locking pattern </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a></td><td class="indexvalue">Reader-writer lock with local-only spinning </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a></td><td class="indexvalue">The scoped locking pattern </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a></td><td class="indexvalue">Writer-preference reader-writer lock with local-only spinning on readers </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a></td><td class="indexvalue">The scoped lock pattern for write locks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a></td><td class="indexvalue">The scoped lock pattern for read locks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00194.html">tbb::recursive_mutex</a></td><td class="indexvalue">Mutex that allows recursive mutex acquisition </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a></td><td class="indexvalue">The scoped locking pattern </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a></td><td class="indexvalue">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a></td><td class="indexvalue">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00198.html">tbb::simple_partitioner</a></td><td class="indexvalue">A simple partitioner </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00199.html">tbb::spin_mutex</a></td><td class="indexvalue">A lock that occupies a single byte </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a></td><td class="indexvalue">Represents acquisition of a mutex </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a></td><td class="indexvalue">Fast, unfair, spinning reader-writer lock with backoff and writer-preference </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a></td><td class="indexvalue">The scoped locking pattern </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00203.html">tbb::split</a></td><td class="indexvalue">Dummy type that distinguishes splitting constructor from copy constructor </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00204.html">tbb::task</a></td><td class="indexvalue">Base class for user-defined tasks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00205.html">tbb::interface5::internal::task_base</a></td><td class="indexvalue">Base class for methods that became static in TBB 3.0 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00206.html">tbb::task_group_context</a></td><td class="indexvalue">Used to form groups of tasks </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00207.html">tbb::task_list</a></td><td class="indexvalue">A list of children </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00208.html">tbb::task_scheduler_init</a></td><td class="indexvalue">Class representing reference to tbb scheduler </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a></td><td class="indexvalue">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a></td><td class="indexvalue">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00211.html">tbb::tbb_exception</a></td><td class="indexvalue">Interface to be implemented by all exceptions TBB recognizes and propagates across the threads </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a></td><td class="indexvalue">Exception container that preserves the exact copy of the original exception </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a></td><td class="indexvalue">Hash_compare that is default argument for concurrent_hash_map </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00214.html">tbb::thread_bound_filter</a></td><td class="indexvalue">A stage in a pipeline served by a user thread </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00215.html">tbb::tick_count</a></td><td class="indexvalue">Absolute timestamp </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00216.html">tbb::tick_count::interval_t</a></td><td class="indexvalue">Relative time interval </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00217.html">tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</a></td><td class="indexvalue">Work around for bug in GNU 3.2 and MSVC compilers </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a></td><td class="indexvalue">Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a></td><td class="indexvalue">Analogous to std::allocator&lt;void&gt;, as defined in ISO C++ Standard, Section 20.4.1 </td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/concepts.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/concepts.html
new file mode 100644 (file)
index 0000000..cfb3171
--- /dev/null
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>TBB concepts</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a></div>
+<h1><a class="anchor" name="concepts">TBB concepts</a></h1>A concept is a set of requirements to a type, which are necessary and sufficient for the type to model a particular behavior or a set of behaviors. Some concepts are specific to a particular algorithm (e.g. algorithm body), while other ones are common to several algorithms (e.g. range concept).<p>
+All TBB algorithms make use of different classes implementing various concepts. Implementation classes are supplied by the user as type arguments of template parameters and/or as objects passed as function call arguments. The library provides predefined implementations of some concepts (e.g. several kinds of <a class="el" href="range_req.html">ranges</a>), while other ones must always be implemented by the user.<p>
+TBB defines a set of minimal requirements each concept must conform to. Here is the list of different concepts hyperlinked to the corresponding requirements specifications:<ul>
+<li><a class="el" href="range_req.html">Requirements on range concept</a></li><li><a class="el" href="parallel_do_body_req.html">Requirements on parallel_do body</a></li><li><a class="el" href="parallel_for_body_req.html">Requirements on parallel_for body</a></li><li><a class="el" href="parallel_reduce_body_req.html">Requirements on parallel_reduce body</a></li><li><a class="el" href="parallel_scan_body_req.html">Requirements on parallel_scan body</a></li><li><a class="el" href="parallel_sort_iter_req.html">Requirements on iterators for parallel_sort</a> </li></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/deprecated.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/deprecated.html
new file mode 100644 (file)
index 0000000..5398b36
--- /dev/null
@@ -0,0 +1,29 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Deprecated List</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1><a class="anchor" name="deprecated">Deprecated List</a></h1><a class="anchor" name="_deprecated000001"></a> <dl>
+<dt>Member <a class="el" href="a00165.html#48da3536245318af6cb5fd58bac78039">tbb::deprecated::concurrent_queue::pop_if_present</a> (T &amp;destination) </dt>
+<dd>Use try_pop() </dd>
+</dl>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.css b/deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.css
new file mode 100644 (file)
index 0000000..5d58369
--- /dev/null
@@ -0,0 +1,358 @@
+BODY,H1,H2,H3,H4,H5,H6,P,CENTER,TD,TH,UL,DL,DIV {
+       font-family: Geneva, Arial, Helvetica, sans-serif;
+}
+BODY,TD {
+       font-size: 90%;
+}
+H1 {
+       text-align: center;
+       font-size: 160%;
+}
+H2 {
+       font-size: 120%;
+}
+H3 {
+       font-size: 100%;
+}
+CAPTION { font-weight: bold }
+DIV.qindex {
+       width: 100%;
+       background-color: #e8eef2;
+       border: 1px solid #84b0c7;
+       text-align: center;
+       margin: 2px;
+       padding: 2px;
+       line-height: 140%;
+}
+DIV.nav {
+       width: 100%;
+       background-color: #e8eef2;
+       border: 1px solid #84b0c7;
+       text-align: center;
+       margin: 2px;
+       padding: 2px;
+       line-height: 140%;
+}
+DIV.navtab {
+       background-color: #e8eef2;
+       border: 1px solid #84b0c7;
+       text-align: center;
+       margin: 2px;
+       margin-right: 15px;
+       padding: 2px;
+}
+TD.navtab {
+       font-size: 70%;
+}
+A.qindex {
+       text-decoration: none;
+       font-weight: bold;
+       color: #1A419D;
+}
+A.qindex:visited {
+       text-decoration: none;
+       font-weight: bold;
+       color: #1A419D
+}
+A.qindex:hover {
+       text-decoration: none;
+       background-color: #ddddff;
+}
+A.qindexHL {
+       text-decoration: none;
+       font-weight: bold;
+       background-color: #6666cc;
+       color: #ffffff;
+       border: 1px double #9295C2;
+}
+A.qindexHL:hover {
+       text-decoration: none;
+       background-color: #6666cc;
+       color: #ffffff;
+}
+A.qindexHL:visited { text-decoration: none; background-color: #6666cc; color: #ffffff }
+A.el { text-decoration: none; font-weight: bold }
+A.elRef { font-weight: bold }
+A.code:link { text-decoration: none; font-weight: normal; color: #0000FF}
+A.code:visited { text-decoration: none; font-weight: normal; color: #0000FF}
+A.codeRef:link { font-weight: normal; color: #0000FF}
+A.codeRef:visited { font-weight: normal; color: #0000FF}
+A:hover { text-decoration: none; background-color: #f2f2ff }
+DL.el { margin-left: -1cm }
+.fragment {
+       font-family: monospace, fixed;
+       font-size: 95%;
+}
+PRE.fragment {
+       border: 1px solid #CCCCCC;
+       background-color: #f5f5f5;
+       margin-top: 4px;
+       margin-bottom: 4px;
+       margin-left: 2px;
+       margin-right: 8px;
+       padding-left: 6px;
+       padding-right: 6px;
+       padding-top: 4px;
+       padding-bottom: 4px;
+}
+DIV.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px }
+
+DIV.groupHeader {
+       margin-left: 16px;
+       margin-top: 12px;
+       margin-bottom: 6px;
+       font-weight: bold;
+}
+DIV.groupText { margin-left: 16px; font-style: italic; font-size: 90% }
+BODY {
+       background: white;
+       color: black;
+       margin-right: 20px;
+       margin-left: 20px;
+}
+TD.indexkey {
+       background-color: #e8eef2;
+       font-weight: bold;
+       padding-right  : 10px;
+       padding-top    : 2px;
+       padding-left   : 10px;
+       padding-bottom : 2px;
+       margin-left    : 0px;
+       margin-right   : 0px;
+       margin-top     : 2px;
+       margin-bottom  : 2px;
+       border: 1px solid #CCCCCC;
+}
+TD.indexvalue {
+       background-color: #e8eef2;
+       font-style: italic;
+       padding-right  : 10px;
+       padding-top    : 2px;
+       padding-left   : 10px;
+       padding-bottom : 2px;
+       margin-left    : 0px;
+       margin-right   : 0px;
+       margin-top     : 2px;
+       margin-bottom  : 2px;
+       border: 1px solid #CCCCCC;
+}
+TR.memlist {
+   background-color: #f0f0f0; 
+}
+P.formulaDsp { text-align: center; }
+IMG.formulaDsp { }
+IMG.formulaInl { vertical-align: middle; }
+SPAN.keyword       { color: #008000 }
+SPAN.keywordtype   { color: #604020 }
+SPAN.keywordflow   { color: #e08000 }
+SPAN.comment       { color: #800000 }
+SPAN.preprocessor  { color: #806020 }
+SPAN.stringliteral { color: #002080 }
+SPAN.charliteral   { color: #008080 }
+.mdescLeft {
+       padding: 0px 8px 4px 8px;
+       font-size: 80%;
+       font-style: italic;
+       background-color: #FAFAFA;
+       border-top: 1px none #E0E0E0;
+       border-right: 1px none #E0E0E0;
+       border-bottom: 1px none #E0E0E0;
+       border-left: 1px none #E0E0E0;
+       margin: 0px;
+}
+.mdescRight {
+       padding: 0px 8px 4px 8px;
+       font-size: 80%;
+       font-style: italic;
+       background-color: #FAFAFA;
+       border-top: 1px none #E0E0E0;
+       border-right: 1px none #E0E0E0;
+       border-bottom: 1px none #E0E0E0;
+       border-left: 1px none #E0E0E0;
+       margin: 0px;
+}
+.memItemLeft {
+       padding: 1px 0px 0px 8px;
+       margin: 4px;
+       border-top-width: 1px;
+       border-right-width: 1px;
+       border-bottom-width: 1px;
+       border-left-width: 1px;
+       border-top-color: #E0E0E0;
+       border-right-color: #E0E0E0;
+       border-bottom-color: #E0E0E0;
+       border-left-color: #E0E0E0;
+       border-top-style: solid;
+       border-right-style: none;
+       border-bottom-style: none;
+       border-left-style: none;
+       background-color: #FAFAFA;
+       font-size: 80%;
+}
+.memItemRight {
+       padding: 1px 8px 0px 8px;
+       margin: 4px;
+       border-top-width: 1px;
+       border-right-width: 1px;
+       border-bottom-width: 1px;
+       border-left-width: 1px;
+       border-top-color: #E0E0E0;
+       border-right-color: #E0E0E0;
+       border-bottom-color: #E0E0E0;
+       border-left-color: #E0E0E0;
+       border-top-style: solid;
+       border-right-style: none;
+       border-bottom-style: none;
+       border-left-style: none;
+       background-color: #FAFAFA;
+       font-size: 80%;
+}
+.memTemplItemLeft {
+       padding: 1px 0px 0px 8px;
+       margin: 4px;
+       border-top-width: 1px;
+       border-right-width: 1px;
+       border-bottom-width: 1px;
+       border-left-width: 1px;
+       border-top-color: #E0E0E0;
+       border-right-color: #E0E0E0;
+       border-bottom-color: #E0E0E0;
+       border-left-color: #E0E0E0;
+       border-top-style: none;
+       border-right-style: none;
+       border-bottom-style: none;
+       border-left-style: none;
+       background-color: #FAFAFA;
+       font-size: 80%;
+}
+.memTemplItemRight {
+       padding: 1px 8px 0px 8px;
+       margin: 4px;
+       border-top-width: 1px;
+       border-right-width: 1px;
+       border-bottom-width: 1px;
+       border-left-width: 1px;
+       border-top-color: #E0E0E0;
+       border-right-color: #E0E0E0;
+       border-bottom-color: #E0E0E0;
+       border-left-color: #E0E0E0;
+       border-top-style: none;
+       border-right-style: none;
+       border-bottom-style: none;
+       border-left-style: none;
+       background-color: #FAFAFA;
+       font-size: 80%;
+}
+.memTemplParams {
+       padding: 1px 0px 0px 8px;
+       margin: 4px;
+       border-top-width: 1px;
+       border-right-width: 1px;
+       border-bottom-width: 1px;
+       border-left-width: 1px;
+       border-top-color: #E0E0E0;
+       border-right-color: #E0E0E0;
+       border-bottom-color: #E0E0E0;
+       border-left-color: #E0E0E0;
+       border-top-style: solid;
+       border-right-style: none;
+       border-bottom-style: none;
+       border-left-style: none;
+       color: #606060;
+       background-color: #FAFAFA;
+       font-size: 80%;
+}
+.search     { color: #003399;
+              font-weight: bold;
+}
+FORM.search {
+              margin-bottom: 0px;
+              margin-top: 0px;
+}
+INPUT.search { font-size: 75%;
+               color: #000080;
+               font-weight: normal;
+               background-color: #e8eef2;
+}
+TD.tiny      { font-size: 75%;
+}
+a {
+       color: #1A41A8;
+}
+a:visited {
+       color: #2A3798;
+}
+.dirtab { padding: 4px;
+          border-collapse: collapse;
+          border: 1px solid #84b0c7;
+}
+TH.dirtab { background: #e8eef2;
+            font-weight: bold;
+}
+HR { height: 1px;
+     border: none;
+     border-top: 1px solid black;
+}
+
+/* Style for detailed member documentation */
+.memtemplate {
+  font-size: 80%;
+  color: #606060;
+  font-weight: normal;
+} 
+.memnav { 
+  background-color: #e8eef2;
+  border: 1px solid #84b0c7;
+  text-align: center;
+  margin: 2px;
+  margin-right: 15px;
+  padding: 2px;
+}
+.memitem {
+  padding: 4px;
+  background-color: #eef3f5;
+  border-width: 1px;
+  border-style: solid;
+  border-color: #dedeee;
+  -moz-border-radius: 8px 8px 8px 8px;
+}
+.memname {
+  white-space: nowrap;
+  font-weight: bold;
+}
+.memdoc{
+  padding-left: 10px;
+}
+.memproto {
+  background-color: #d5e1e8;
+  width: 100%;
+  border-width: 1px;
+  border-style: solid;
+  border-color: #84b0c7;
+  font-weight: bold;
+  -moz-border-radius: 8px 8px 8px 8px;
+}
+.paramkey {
+  text-align: right;
+}
+.paramtype {
+  white-space: nowrap;
+}
+.paramname {
+  color: #602020;
+  font-style: italic;
+}
+/* End Styling for detailed member documentation */
+
+/* for the tree view */
+.ftvtree {
+       font-family: sans-serif;
+       margin:0.5em;
+}
+.directory { font-size: 9pt; font-weight: bold; }
+.directory h3 { margin: 0px; margin-top: 1em; font-size: 11pt; }
+.directory > h3 { margin-top: 0; }
+.directory p { margin: 0px; white-space: nowrap; }
+.directory div { display: none; margin: 0px; }
+.directory img { vertical-align: -30%; }
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.png b/deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.png
new file mode 100644 (file)
index 0000000..f0a274b
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/doxygen.png differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/files.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/files.html
new file mode 100644 (file)
index 0000000..6dbb83a
--- /dev/null
@@ -0,0 +1,83 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>File Index</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>File List</h1>Here is a list of all documented files with brief descriptions:<table>
+  <tr><td class="indexkey"><b>_concurrent_queue_internal.h</b> <a href="a00286.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>_concurrent_unordered_internal.h</b> <a href="a00304.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>_tbb_windef.h</b> <a href="a00314.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>aligned_space.h</b> <a href="a00316.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>atomic.h</b> <a href="a00317.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>blocked_range.h</b> <a href="a00325.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>blocked_range2d.h</b> <a href="a00326.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>blocked_range3d.h</b> <a href="a00327.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>cache_aligned_allocator.h</b> <a href="a00328.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>combinable.h</b> <a href="a00330.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>concurrent_hash_map.h</b> <a href="a00331.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>concurrent_queue.h</b> <a href="a00341.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>concurrent_unordered_map.h</b> <a href="a00342.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>concurrent_vector.h</b> <a href="a00347.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>critical_section.h</b> <a href="a00356.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>enumerable_thread_specific.h</b> <a href="a00359.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>mutex.h</b> <a href="a00372.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>null_mutex.h</b> <a href="a00374.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>null_rw_mutex.h</b> <a href="a00375.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_do.h</b> <a href="a00376.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_for.h</b> <a href="a00385.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_for_each.h</b> <a href="a00389.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_invoke.h</b> <a href="a00391.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_reduce.h</b> <a href="a00397.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_scan.h</b> <a href="a00401.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_sort.h</b> <a href="a00406.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>parallel_while.h</b> <a href="a00410.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>partitioner.h</b> <a href="a00414.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>pipeline.h</b> <a href="a00419.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>queuing_mutex.h</b> <a href="a00431.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>queuing_rw_mutex.h</b> <a href="a00432.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>reader_writer_lock.h</b> <a href="a00433.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>recursive_mutex.h</b> <a href="a00434.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00253.html">scalable_allocator.h</a> <a href="a00435.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>spin_mutex.h</b> <a href="a00436.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>spin_rw_mutex.h</b> <a href="a00437.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>task.h</b> <a href="a00438.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>task_group.h</b> <a href="a00447.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>task_scheduler_init.h</b> <a href="a00454.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>task_scheduler_observer.h</b> <a href="a00455.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb.h</b> <a href="a00457.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_allocator.h</b> <a href="a00458.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_config.h</b> <a href="a00462.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_config_lrb.h</b> <a href="a00463.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_exception.h</b> <a href="a00464.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_machine.h</b> <a href="a00465.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_profiling.h</b> <a href="a00470.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_stddef.h</b> <a href="a00471.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbb_thread.h</b> <a href="a00478.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tbbmalloc_proxy.h</b> <a href="a00486.html">[code]</a></td><td class="indexvalue"></td></tr>
+  <tr><td class="indexkey"><b>tick_count.h</b> <a href="a00488.html">[code]</a></td><td class="indexvalue"></td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions.html
new file mode 100644 (file)
index 0000000..6fce59c
--- /dev/null
@@ -0,0 +1,309 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+<div class="tabs">
+  <ul>
+    <li><a href="#index_a"><span>a</span></a></li>
+    <li><a href="#index_b"><span>b</span></a></li>
+    <li><a href="#index_c"><span>c</span></a></li>
+    <li><a href="#index_d"><span>d</span></a></li>
+    <li><a href="#index_e"><span>e</span></a></li>
+    <li><a href="#index_f"><span>f</span></a></li>
+    <li><a href="#index_g"><span>g</span></a></li>
+    <li><a href="#index_i"><span>i</span></a></li>
+    <li><a href="#index_l"><span>l</span></a></li>
+    <li><a href="#index_m"><span>m</span></a></li>
+    <li><a href="#index_n"><span>n</span></a></li>
+    <li><a href="#index_o"><span>o</span></a></li>
+    <li><a href="#index_p"><span>p</span></a></li>
+    <li><a href="#index_q"><span>q</span></a></li>
+    <li><a href="#index_r"><span>r</span></a></li>
+    <li><a href="#index_s"><span>s</span></a></li>
+    <li><a href="#index_t"><span>t</span></a></li>
+    <li><a href="#index_u"><span>u</span></a></li>
+    <li><a href="#index_v"><span>v</span></a></li>
+    <li><a href="#index_w"><span>w</span></a></li>
+    <li><a href="#index_~"><span>~</span></a></li>
+  </ul>
+</div>
+
+<p>
+Here is a list of all documented class members with links to the class documentation for each member:
+<p>
+<h3><a class="anchor" name="index_a">- a -</a></h3><ul>
+<li>acquire()
+: <a class="el" href="a00202.html#b0b646ec5be02a127d159bbb7ca65353">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#533e4fc8355ee321206a0609c42d909d">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#862e022841cdc522e4296a5533b22efd">tbb::mutex::scoped_lock</a>, <a class="el" href="a00162.html#26b4fe0ca87a7ad4852cb787db880119">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a><li>add()
+: <a class="el" href="a00184.html#e131c560057a58229992b61eb8dba4c6">tbb::parallel_while&lt; Body &gt;</a>, <a class="el" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">tbb::parallel_do_feeder&lt; Item &gt;</a><li>add_filter()
+: <a class="el" href="a00185.html#38fb5c9c8395dd6f89a4ae2011a83e0d">tbb::pipeline</a><li>affinity()
+: <a class="el" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">tbb::task</a><li>affinity_id
+: <a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">tbb::task</a><li>allocate()
+: <a class="el" href="a00212.html#c35e5db8e9cdff5d1387db5b0bad2e4a">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">tbb::cache_aligned_allocator&lt; T &gt;</a><li>allocate_child()
+: <a class="el" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">tbb::task</a><li>allocate_continuation()
+: <a class="el" href="a00204.html#1434c79a5138993269d034008bff7329">tbb::task</a><li>allocate_root()
+: <a class="el" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">tbb::task</a><li>allocated
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">tbb::task</a><li>allocator_type
+: <a class="el" href="a00209.html#78701e7454ef8e1a25b5acd364367080">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00168.html#3c03eb40955b933b01987222722ac4bd">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>assign()
+: <a class="el" href="a00166.html#93a06b3112cb804f42f40efb5e7387b4">tbb::concurrent_vector&lt; T, A &gt;</a><li>at()
+: <a class="el" href="a00166.html#23e14a38af748edff96a7adc3a0f1c58">tbb::concurrent_vector&lt; T, A &gt;</a><li>automatic
+: <a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">tbb::task_scheduler_init</a></ul>
+<h3><a class="anchor" name="index_b">- b -</a></h3><ul>
+<li>back()
+: <a class="el" href="a00166.html#bd518e204107d07fd08d0ec5bdfd383d">tbb::concurrent_vector&lt; T, A &gt;</a><li>begin()
+: <a class="el" href="a00168.html#84afb3906a39e399cde1c950d6351300">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#78a06182276ff758788d4c0623ae0d71">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">tbb::blocked_range&lt; Value &gt;</a>, <a class="el" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">tbb::aligned_space&lt; T, N &gt;</a><li>blocked_range()
+: <a class="el" href="a00152.html#4c0efd2be3f96a0ab3ba5085e8b3fcc7">tbb::blocked_range&lt; Value &gt;</a><li>bucket_count()
+: <a class="el" href="a00160.html#414d15033d36c63aa3a40666dc4d6f5e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_c">- c -</a></h3><ul>
+<li>cancel_group_execution()
+: <a class="el" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">tbb::task</a>, <a class="el" href="a00206.html#8bcdfdf4e6bfb76125b6de15c00b571d">tbb::task_group_context</a><li>capacity()
+: <a class="el" href="a00166.html#3ed6b9ae7217af5103d974045b6f5cd5">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>cbegin()
+: <a class="el" href="a00166.html#f88fcf1c920693c39bd9709db33c199f">tbb::concurrent_vector&lt; T, A &gt;</a><li>cend()
+: <a class="el" href="a00166.html#0c15a5d0f1cf75d687dabba07da1d46b">tbb::concurrent_vector&lt; T, A &gt;</a><li>clear()
+: <a class="el" href="a00207.html#fce446ee13e025969945328f3ff59b95">tbb::task_list</a>, <a class="el" href="a00185.html#2c84aef5b834b555ee220b176e25931e">tbb::pipeline</a>, <a class="el" href="a00168.html#a8764176d4b6014c5d65f1051851abc8">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#a9f89be8fe28835749529d91081a2511">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>cols()
+: <a class="el" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#392a46759af2c884957115771affa7f4">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>compact()
+: <a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">tbb::concurrent_vector&lt; T, A &gt;</a><li>concurrent_bounded_queue()
+: <a class="el" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>concurrent_hash_map()
+: <a class="el" href="a00160.html#3bfe75fcb350ce39cf610c164f233edc">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>concurrent_queue()
+: <a class="el" href="a00165.html#9102b897776bd2d9e908e6604ff16b5f">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#8a6b98ea11a867db8ac868f0113ca429">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>concurrent_vector()
+: <a class="el" href="a00166.html#4450de83c5862ea4bcd9443fd7e67419">tbb::concurrent_vector&lt; T, A &gt;</a><li>const_accessor()
+: <a class="el" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>const_iterator
+: <a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">tbb::blocked_range&lt; Value &gt;</a><li>const_reference
+: <a class="el" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>construct()
+: <a class="el" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#958ee8745c86c275bfc9533af565e017">tbb::cache_aligned_allocator&lt; T &gt;</a><li>context()
+: <a class="el" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">tbb::task</a><li>count()
+: <a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>crbegin()
+: <a class="el" href="a00166.html#db78a1d28c9c966050e8a2926d834a33">tbb::concurrent_vector&lt; T, A &gt;</a><li>crend()
+: <a class="el" href="a00166.html#fff9cece89438587997ebedf93c5e962">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_d">- d -</a></h3><ul>
+<li>deallocate()
+: <a class="el" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">tbb::cache_aligned_allocator&lt; T &gt;</a><li>decrement_ref_count()
+: <a class="el" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">tbb::task</a><li>default_num_threads()
+: <a class="el" href="a00208.html#ba00714c33a41a3c2216f48613971cab">tbb::task_scheduler_init</a><li>deferred
+: <a class="el" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">tbb::task_scheduler_init</a><li>destroy()
+: <a class="el" href="a00212.html#921875bbacd2c8a5f324c7da7a415262">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#93d875d3555502ff6f18513525de204c">tbb::captured_exception</a>, <a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">tbb::tbb_exception</a>, <a class="el" href="a00209.html#ef133522bf55f05a605bee0763208281">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">tbb::task</a>, <a class="el" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">tbb::cache_aligned_allocator&lt; T &gt;</a><li>difference_type
+: <a class="el" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>downgrade_to_reader()
+: <a class="el" href="a00202.html#c2c2c38a08cb9080e87099fac3e5bc94">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00190.html#0d2f93edf7b15ec4bcee138823220c52">tbb::queuing_rw_mutex::scoped_lock</a></ul>
+<h3><a class="anchor" name="index_e">- e -</a></h3><ul>
+<li>empty()
+: <a class="el" href="a00207.html#f3ac31e092814b90929f81bb30441959">tbb::task_list</a>, <a class="el" href="a00168.html#72595886d0ac8fd0543f90038570510d">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00163.html#5cce3104cb0a52e08d2131370871c614">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>, <a class="el" href="a00154.html#356860e1c977d91711e8216bd55c0b25">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#d144cb2d88cef553420311aca8667a44">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>, <a class="el" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">tbb::blocked_range&lt; Value &gt;</a><li>end()
+: <a class="el" href="a00168.html#cb448bb4977ce366ceb7344085cc7050">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#1e6aa764ce5a1cbd24526f68bc0a2f6b">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">tbb::blocked_range&lt; Value &gt;</a>, <a class="el" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">tbb::aligned_space&lt; T, N &gt;</a><li>enqueue()
+: <a class="el" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">tbb::task</a><li>enumerable_thread_specific()
+: <a class="el" href="a00168.html#7bce6829981c9efe3f59cae2355e383e">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>erase()
+: <a class="el" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>exclude()
+: <a class="el" href="a00160.html#faad2108bd2be75e52293486af59f11e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>execute()
+: <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">tbb::task</a><li>executing
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">tbb::task</a></ul>
+<h3><a class="anchor" name="index_f">- f -</a></h3><ul>
+<li>finalize()
+: <a class="el" href="a00169.html#56275eb889c77c4807967133e21401bd">tbb::filter</a><li>find()
+: <a class="el" href="a00160.html#bce7bdf46435115a95cca2aa73c5da83">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>freed
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">tbb::task</a><li>front()
+: <a class="el" href="a00166.html#502615a858eb9fa0390ee59169065e90">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_g">- g -</a></h3><ul>
+<li>get_allocator()
+: <a class="el" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#199208eed6f09e200cda364f906be0fe">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>grainsize()
+: <a class="el" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">tbb::blocked_range&lt; Value &gt;</a><li>grow_by()
+: <a class="el" href="a00166.html#473a59a4c9308b93411b898b3110d26c">tbb::concurrent_vector&lt; T, A &gt;</a><li>grow_to_at_least()
+: <a class="el" href="a00166.html#a7e3b67c8ccab16d0aecc80899ae799d">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_i">- i -</a></h3><ul>
+<li>increment_ref_count()
+: <a class="el" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">tbb::task</a><li>init()
+: <a class="el" href="a00206.html#49a55352084fd44b8863d182e839e6dc">tbb::task_group_context</a><li>initialize()
+: <a class="el" href="a00208.html#d5ed214a8bb53b0466ed91ff4734b9a3">tbb::task_scheduler_init</a><li>insert()
+: <a class="el" href="a00160.html#cfe172677e5987004ef4a03e22fa338a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_copy()
+: <a class="el" href="a00160.html#3c27779fe66b79505390d084310d997e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_equal_range()
+: <a class="el" href="a00160.html#976c57edfb7f22b9f91a2e11f141eb4a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_fast_find()
+: <a class="el" href="a00160.html#2f76ed101a0ccc8875b846c2f747897e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>interval_t()
+: <a class="el" href="a00216.html#1a21a428e00cced2e6a49e0f5f2258bf">tbb::tick_count::interval_t</a><li>is_active()
+: <a class="el" href="a00208.html#12752282977029f23416642bc03e8b74">tbb::task_scheduler_init</a><li>is_bound()
+: <a class="el" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">tbb::filter</a><li>is_cancelled()
+: <a class="el" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">tbb::task</a><li>is_divisible()
+: <a class="el" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>, <a class="el" href="a00152.html#41a58b703d574b6e1ca155df3576f578">tbb::blocked_range&lt; Value &gt;</a><li>is_group_execution_cancelled()
+: <a class="el" href="a00206.html#4db72f16210b0a991b2c134d6763a4cc">tbb::task_group_context</a><li>is_ordered()
+: <a class="el" href="a00169.html#cd53206c4795ef2df5df26b795caf692">tbb::filter</a><li>is_owned_by_current_thread()
+: <a class="el" href="a00204.html#c26718b3b247cd13deb1a741902e7105">tbb::task</a><li>is_serial()
+: <a class="el" href="a00169.html#fcfec27656a69ff2072802ac001e936f">tbb::filter</a><li>is_stolen_task()
+: <a class="el" href="a00204.html#f9169402702f56bf519448aaf34450aa">tbb::task</a><li>is_writer()
+: <a class="el" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></ul>
+<h3><a class="anchor" name="index_l">- l -</a></h3><ul>
+<li>local()
+: <a class="el" href="a00168.html#7dc79058d2832f7447de8e691c3455ea">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>lock()
+: <a class="el" href="a00201.html#4007d6e1523dbc3c2bb7f889ab789a8a">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">tbb::spin_mutex</a>, <a class="el" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">tbb::mutex</a><li>lock_read()
+: <a class="el" href="a00201.html#13f799708ac4ca437a16be202e263e18">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00191.html#d9d16a24d9f6c3dada73c6b9ff214f5b">tbb::interface5::reader_writer_lock</a><li>lookup()
+: <a class="el" href="a00160.html#1f22480a290ddc6c145888d8f985531a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_m">- m -</a></h3><ul>
+<li>make_filter
+: <a class="el" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">tbb::interface5::filter_t&lt; T, U &gt;</a><li>malloc_type
+: <a class="el" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">tbb::tbb_allocator&lt; T &gt;</a><li>max_size()
+: <a class="el" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#880e766f1d913988c21973dbdd874fd5">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#1e45d3cbd1e2ae06f365f1b48e0df0b5">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">tbb::cache_aligned_allocator&lt; T &gt;</a><li>move()
+: <a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">tbb::captured_exception</a>, <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">tbb::tbb_exception</a><li>mutex()
+: <a class="el" href="a00177.html#05313cb77d4f85213103d4dab74ed454">tbb::mutex</a><li>my_exception_data
+: <a class="el" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">tbb::movable_exception&lt; ExceptionData &gt;</a></ul>
+<h3><a class="anchor" name="index_n">- n -</a></h3><ul>
+<li>name()
+: <a class="el" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">tbb::captured_exception</a>, <a class="el" href="a00211.html#d00f6497e552fee978a02bfcbebf46e2">tbb::tbb_exception</a><li>native_handle_type
+: <a class="el" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">tbb::recursive_mutex</a>, <a class="el" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">tbb::mutex</a><li>note_affinity()
+: <a class="el" href="a00204.html#713c338c8eeaebdc5a6b10a69c039b06">tbb::task</a><li>now()
+: <a class="el" href="a00215.html#fb7f78ca61cf28398645ace66e284473">tbb::tick_count</a></ul>
+<h3><a class="anchor" name="index_o">- o -</a></h3><ul>
+<li>operator *()
+: <a class="el" href="a00161.html#e8938f0cd1211e88a1d73527ed3636c4">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#30f31106840700a4c3664b9cb1c31ca7">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>operator delete()
+: <a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">tbb::tbb_exception</a><li>operator()()
+: <a class="el" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">tbb::filter</a>, <a class="el" href="a00162.html#57c6110bd20e95c06de5a199de988941">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a><li>operator+
+: <a class="el" href="a00216.html#5871ead1ca230efbe52a5008470e6428">tbb::tick_count::interval_t</a><li>operator+=()
+: <a class="el" href="a00216.html#cd9814947902e26463a69a111530f81b">tbb::tick_count::interval_t</a><li>operator-
+: <a class="el" href="a00215.html#09dde78a4100800c11bb883d6204b586">tbb::tick_count</a>, <a class="el" href="a00216.html#fa509691e1d689830931e36edd274f76">tbb::tick_count::interval_t</a><li>operator-=()
+: <a class="el" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">tbb::tick_count::interval_t</a><li>operator-&gt;()
+: <a class="el" href="a00161.html#fcebc32c020202cc37e60eadef157569">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#3d03a48ecb8cd9549bd8be64b09c9b0d">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>operator=()
+: <a class="el" href="a00166.html#19f4ab88a01b0fd056af3bba463e7bd6">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>operator[]()
+: <a class="el" href="a00166.html#c6fade5c732cc95274d1d8277ea619d1">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_p">- p -</a></h3><ul>
+<li>page_range_type
+: <a class="el" href="a00154.html#b8ebf17a552ba47825e9b3887855b719">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a><li>pages()
+: <a class="el" href="a00154.html#cf971430aa12361d3ed245344b7c6764">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a><li>parallel_while()
+: <a class="el" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">tbb::parallel_while&lt; Body &gt;</a><li>parent()
+: <a class="el" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">tbb::task</a><li>pause()
+: <a class="el" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">tbb::internal::atomic_backoff</a><li>pipeline()
+: <a class="el" href="a00185.html#596dc3beba27099c4c8581cb419e1a59">tbb::pipeline</a><li>pop()
+: <a class="el" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>pop_front()
+: <a class="el" href="a00207.html#5fe85df5ed524418389d34051750347d">tbb::task_list</a><li>pop_if_present()
+: <a class="el" href="a00165.html#48da3536245318af6cb5fd58bac78039">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a><li>process_item()
+: <a class="el" href="a00214.html#5e726bdc7fbd924c0b07bd558b1d4d5d">tbb::thread_bound_filter</a><li>push()
+: <a class="el" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>push_back()
+: <a class="el" href="a00207.html#4cd34756bc4763dafb8c84838a0124ff">tbb::task_list</a>, <a class="el" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">tbb::concurrent_vector&lt; T, A &gt;</a><li>push_if_not_full()
+: <a class="el" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_q">- q -</a></h3><ul>
+<li>queuing_mutex()
+: <a class="el" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">tbb::queuing_mutex</a><li>queuing_rw_mutex()
+: <a class="el" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">tbb::queuing_rw_mutex</a></ul>
+<h3><a class="anchor" name="index_r">- r -</a></h3><ul>
+<li>range()
+: <a class="el" href="a00168.html#3b068000cf4dbf9b40f8bb7e3fc53e0b">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#3d09ccfb581b879ae64203741035e193">tbb::concurrent_vector&lt; T, A &gt;</a><li>rbegin()
+: <a class="el" href="a00166.html#9f9c103e18d5f212703805354074ad44">tbb::concurrent_vector&lt; T, A &gt;</a><li>reader_writer_lock()
+: <a class="el" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">tbb::interface5::reader_writer_lock</a><li>ready
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">tbb::task</a><li>recursive_mutex()
+: <a class="el" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">tbb::recursive_mutex</a><li>recycle
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">tbb::task</a><li>recycle_as_child_of()
+: <a class="el" href="a00204.html#db399855177438bbc9cc61d508dae8d2">tbb::task</a><li>recycle_as_continuation()
+: <a class="el" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">tbb::task</a><li>recycle_as_safe_continuation()
+: <a class="el" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">tbb::task</a><li>recycle_to_reexecute()
+: <a class="el" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">tbb::task</a><li>reexecute
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">tbb::task</a><li>ref_count()
+: <a class="el" href="a00204.html#ad774f55eaec008ae02b236423209ced">tbb::task</a><li>reference
+: <a class="el" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>register_pending_exception()
+: <a class="el" href="a00206.html#d97c8a03615594b71b4ef06ff75cf561">tbb::task_group_context</a><li>rehash()
+: <a class="el" href="a00160.html#13f3f2e8de7564be03882c31559493c9">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>release()
+: <a class="el" href="a00202.html#61b14d00a78185c9b2d206ebfc379124">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#67ae221109ddc69510ab593874e435d4">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">tbb::mutex::scoped_lock</a>, <a class="el" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>rend()
+: <a class="el" href="a00166.html#d438b9b32ea3a8ffb703015b6dce055b">tbb::concurrent_vector&lt; T, A &gt;</a><li>reserve()
+: <a class="el" href="a00166.html#5a0ce05026994b010018f72cfdeb72c1">tbb::concurrent_vector&lt; T, A &gt;</a><li>reset()
+: <a class="el" href="a00206.html#6d30d16bf1cd22f86c6afaf29c2b430c">tbb::task_group_context</a><li>resize()
+: <a class="el" href="a00166.html#98ce6b2c6d2622f0c030b46dfac3880c">tbb::concurrent_vector&lt; T, A &gt;</a><li>row_range_type
+: <a class="el" href="a00153.html#a807a22fe658ec38b8edfd69521d0383">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>rows()
+: <a class="el" href="a00154.html#1584623e59ff32a8aa82006827508be4">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#f496e7348a82652fba581203477cc07c">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>run()
+: <a class="el" href="a00185.html#93d7fec8cd607b803dd2d79fb46bd260">tbb::pipeline</a>, <a class="el" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">tbb::parallel_while&lt; Body &gt;</a></ul>
+<h3><a class="anchor" name="index_s">- s -</a></h3><ul>
+<li>scoped_lock()
+: <a class="el" href="a00202.html#42a92d4f8fdde425b111cfa8a9228071">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#5ce6807050a9e8f87bcb4a65dccb12ef">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#dec17713c4c1321ac8fec66816d0c602">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00192.html#cf19f20e082887c1bb0ba6b0911c3583">tbb::interface5::reader_writer_lock::scoped_lock</a>, <a class="el" href="a00190.html#fbb8798792d3aebb136c46fc63d2529e">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#9b51ef972f5618ac17caadb58841ab6d">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#605a6b9af0f8cdabdf81825e0de99600">tbb::mutex::scoped_lock</a><li>scoped_lock_read()
+: <a class="el" href="a00193.html#87ab0dc8f7216e6ba0f7acd6aec33064">tbb::interface5::reader_writer_lock::scoped_lock_read</a><li>seconds()
+: <a class="el" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">tbb::tick_count::interval_t</a><li>self()
+: <a class="el" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">tbb::task</a><li>set_affinity()
+: <a class="el" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">tbb::task</a><li>set_capacity()
+: <a class="el" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>set_ref_count()
+: <a class="el" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">tbb::task</a><li>set_state()
+: <a class="el" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">tbb::mutex</a><li>shrink_to_fit()
+: <a class="el" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">tbb::concurrent_vector&lt; T, A &gt;</a><li>size()
+: <a class="el" href="a00168.html#33fd6593da1ed14340f10f67d5a69130">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">tbb::blocked_range&lt; Value &gt;</a><li>size_type
+: <a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">tbb::blocked_range&lt; Value &gt;</a><li>spawn_and_wait_for_all()
+: <a class="el" href="a00204.html#894ab68378e502776d8220eea7ce9fa1">tbb::task</a><li>spawn_root_and_wait()
+: <a class="el" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">tbb::task</a><li>spin_mutex()
+: <a class="el" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">tbb::spin_mutex</a><li>spin_rw_mutex_v3()
+: <a class="el" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">tbb::spin_rw_mutex_v3</a><li>state()
+: <a class="el" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">tbb::task</a><li>state_type
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">tbb::task</a><li>status_t
+: <a class="el" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">tbb::interface5::reader_writer_lock</a><li>swap()
+: <a class="el" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_t">- t -</a></h3><ul>
+<li>task()
+: <a class="el" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">tbb::task</a><li>task_group_context()
+: <a class="el" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">tbb::task_group_context</a><li>task_list()
+: <a class="el" href="a00207.html#416341c2047eaef50417b41eaf7e9de6">tbb::task_list</a><li>task_scheduler_init()
+: <a class="el" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">tbb::task_scheduler_init</a><li>terminate()
+: <a class="el" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">tbb::task_scheduler_init</a><li>throw_self()
+: <a class="el" href="a00212.html#292832fd5c523e3d8081a22247840a1d">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00176.html#17cffba35811c92b7e65d63506b69602">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">tbb::captured_exception</a>, <a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">tbb::tbb_exception</a><li>tick_count()
+: <a class="el" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">tbb::tick_count</a><li>try_acquire()
+: <a class="el" href="a00202.html#9879626968d9b9a04cd2ec0fb2e84ae1">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#2e4ff6c9ec2fee6682f95290d1f42baa">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#e5a014fb817599386a87170cf2cf51a9">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">tbb::mutex::scoped_lock</a><li>try_lock()
+: <a class="el" href="a00201.html#088bb256be794cc47d3b83791632fdfc">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">tbb::spin_mutex</a>, <a class="el" href="a00194.html#86e719b0afee25704af11ab97694d240">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#721eb173e154ab38292273e9266a9b07">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#4331652c79dea1c1131bd59ab161b234">tbb::mutex</a><li>try_lock_read()
+: <a class="el" href="a00201.html#b8667415869013f840d976aa406d385a">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00191.html#595fb23952e3b89426b1f7938dea9b11">tbb::interface5::reader_writer_lock</a><li>try_pop()
+: <a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>try_process_item()
+: <a class="el" href="a00214.html#c4f90f2c771bce748beb9be734fa286c">tbb::thread_bound_filter</a><li>try_push()
+: <a class="el" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_u">- u -</a></h3><ul>
+<li>unlock()
+: <a class="el" href="a00201.html#f9f52ead2098eb5fb12da59d5ae53b55">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">tbb::spin_mutex</a>, <a class="el" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#5113b32689305599b2c36b5831547704">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">tbb::mutex</a><li>unsafe_size()
+: <a class="el" href="a00164.html#eaa35a5274606779802e9a669a706260">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>upgrade_to_writer()
+: <a class="el" href="a00202.html#3f0b1e3f2efab63336400348bd070226">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00190.html#11ba1da4a722c9e6f73339a52c487e82">tbb::queuing_rw_mutex::scoped_lock</a></ul>
+<h3><a class="anchor" name="index_v">- v -</a></h3><ul>
+<li>value_type
+: <a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">tbb::parallel_while&lt; Body &gt;</a>, <a class="el" href="a00159.html#98245517a931e5893f6601e66c51fc75">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#682c3978d5cb0620000994f11c44a476">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00161.html#49eec74f272bab187d176c0d9d16a7fe">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#48647ca0d79c1233b997f5768403c926">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></ul>
+<h3><a class="anchor" name="index_w">- w -</a></h3><ul>
+<li>wait_for_all()
+: <a class="el" href="a00204.html#53d2615ad9c38859b4c8080936600283">tbb::task</a><li>what()
+: <a class="el" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">tbb::captured_exception</a>, <a class="el" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">tbb::tbb_exception</a></ul>
+<h3><a class="anchor" name="index_~">- ~ -</a></h3><ul>
+<li>~combinable()
+: <a class="el" href="a00158.html#2c87e79ae98588a5780f708773388843">tbb::combinable&lt; T &gt;</a><li>~concurrent_bounded_queue()
+: <a class="el" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>~concurrent_hash_map()
+: <a class="el" href="a00160.html#a1ac58997d8fbf242b266e3691573481">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>~concurrent_queue()
+: <a class="el" href="a00164.html#830b33753d6b149c366344e29b2edd8c">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>~concurrent_vector()
+: <a class="el" href="a00166.html#da2444b28bb840d38f60d0030333a5fc">tbb::concurrent_vector&lt; T, A &gt;</a><li>~const_accessor()
+: <a class="el" href="a00163.html#752b0c1ec74b94786403a75e42917d01">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>~enumerable_thread_specific()
+: <a class="el" href="a00168.html#5a7907d9e3e5b18e7a7b55211ef3213f">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>~filter()
+: <a class="el" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">tbb::filter</a><li>~parallel_while()
+: <a class="el" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">tbb::parallel_while&lt; Body &gt;</a><li>~pipeline()
+: <a class="el" href="a00185.html#49513c6c24f9d5bbbb27edca5efe01c9">tbb::pipeline</a><li>~queuing_rw_mutex()
+: <a class="el" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">tbb::queuing_rw_mutex</a><li>~reader_writer_lock()
+: <a class="el" href="a00191.html#5135f64f7b7339017f33d956445edbee">tbb::interface5::reader_writer_lock</a><li>~scoped_lock()
+: <a class="el" href="a00202.html#d7eaaa3f2e2c5dc11e7005811b1bdd04">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00192.html#70246e0260493625ff956fa5926fc71f">tbb::interface5::reader_writer_lock::scoped_lock</a>, <a class="el" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#ac2c576a93570957d694192a5f491443">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">tbb::mutex::scoped_lock</a><li>~scoped_lock_read()
+: <a class="el" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">tbb::interface5::reader_writer_lock::scoped_lock_read</a><li>~spin_rw_mutex_v3()
+: <a class="el" href="a00201.html#9a815fb2759e55072ed413f1b6970cf3">tbb::spin_rw_mutex_v3</a><li>~task()
+: <a class="el" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">tbb::task</a><li>~task_list()
+: <a class="el" href="a00207.html#6d438f1499a02db1e59c24ab6043e5ba">tbb::task_list</a><li>~task_scheduler_init()
+: <a class="el" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">tbb::task_scheduler_init</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_enum.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_enum.html
new file mode 100644 (file)
index 0000000..06e512d
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Enumerations</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li id="current"><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>malloc_type
+: <a class="el" href="a00209.html#09a7f81fb2c3055aaecf058b11538544">tbb::tbb_allocator&lt; T &gt;</a><li>state_type
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e">tbb::task</a><li>status_t
+: <a class="el" href="a00191.html#6f921f0d7c1812ceb5674418c8b6ccaf">tbb::interface5::reader_writer_lock</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_eval.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_eval.html
new file mode 100644 (file)
index 0000000..a9b878c
--- /dev/null
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Enumerator</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li id="current"><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>allocated
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ebe94d3348dd038e41107819f00c1884c">tbb::task</a><li>executing
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ead0fe2302ccc360923f738c2ed7ec1b9">tbb::task</a><li>freed
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293ecc67ca92bd6f1ce9738a1e9e7206b735">tbb::task</a><li>ready
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e0841dcf1c2a96dee9aa7b69f636cb81a">tbb::task</a><li>recycle
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e58debec6ab130290640d0cc2eedba35d">tbb::task</a><li>reexecute
+: <a class="el" href="a00204.html#4a3c415562d17905390ea5b49d12293e3bf499aa6e6487cd1ace883a63100513">tbb::task</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_func.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_func.html
new file mode 100644 (file)
index 0000000..eda6ce2
--- /dev/null
@@ -0,0 +1,281 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Functions</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li id="current"><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+<div class="tabs">
+  <ul>
+    <li><a href="#index_a"><span>a</span></a></li>
+    <li><a href="#index_b"><span>b</span></a></li>
+    <li><a href="#index_c"><span>c</span></a></li>
+    <li><a href="#index_d"><span>d</span></a></li>
+    <li><a href="#index_e"><span>e</span></a></li>
+    <li><a href="#index_f"><span>f</span></a></li>
+    <li><a href="#index_g"><span>g</span></a></li>
+    <li><a href="#index_i"><span>i</span></a></li>
+    <li><a href="#index_l"><span>l</span></a></li>
+    <li><a href="#index_m"><span>m</span></a></li>
+    <li><a href="#index_n"><span>n</span></a></li>
+    <li><a href="#index_o"><span>o</span></a></li>
+    <li><a href="#index_p"><span>p</span></a></li>
+    <li><a href="#index_q"><span>q</span></a></li>
+    <li><a href="#index_r"><span>r</span></a></li>
+    <li><a href="#index_s"><span>s</span></a></li>
+    <li><a href="#index_t"><span>t</span></a></li>
+    <li><a href="#index_u"><span>u</span></a></li>
+    <li><a href="#index_w"><span>w</span></a></li>
+    <li><a href="#index_~"><span>~</span></a></li>
+  </ul>
+</div>
+
+<p>
+&nbsp;
+<p>
+<h3><a class="anchor" name="index_a">- a -</a></h3><ul>
+<li>acquire()
+: <a class="el" href="a00202.html#b0b646ec5be02a127d159bbb7ca65353">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#3ee3c338732b1f64b0b32a757807a30d">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#7fb04da37cccf8c99b1f9102d9074f9a">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#a8dd5ab8686e76de21587544dbb681e0">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#533e4fc8355ee321206a0609c42d909d">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#862e022841cdc522e4296a5533b22efd">tbb::mutex::scoped_lock</a>, <a class="el" href="a00162.html#26b4fe0ca87a7ad4852cb787db880119">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a><li>add()
+: <a class="el" href="a00184.html#e131c560057a58229992b61eb8dba4c6">tbb::parallel_while&lt; Body &gt;</a>, <a class="el" href="a00183.html#40baaf0f6856f4491dd0adf896c93516">tbb::parallel_do_feeder&lt; Item &gt;</a><li>add_filter()
+: <a class="el" href="a00185.html#38fb5c9c8395dd6f89a4ae2011a83e0d">tbb::pipeline</a><li>affinity()
+: <a class="el" href="a00204.html#3a920a56b0bcf2801518fb45b2c9d2be">tbb::task</a><li>allocate()
+: <a class="el" href="a00212.html#c35e5db8e9cdff5d1387db5b0bad2e4a">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00209.html#f6cb487b1bdce0b581f265a77dca6d53">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#726b1586d05d44665a36e1c7b2699bfd">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#4cdeea67af6c1fcd8f1d5e9c4cab61e8">tbb::cache_aligned_allocator&lt; T &gt;</a><li>allocate_child()
+: <a class="el" href="a00204.html#1ff794f7053cd9148d5f280fbf07377f">tbb::task</a><li>allocate_continuation()
+: <a class="el" href="a00204.html#1434c79a5138993269d034008bff7329">tbb::task</a><li>allocate_root()
+: <a class="el" href="a00204.html#8ccc518caf31075a3e073996d2d240a4">tbb::task</a><li>allocator_type()
+: <a class="el" href="a00209.html#78701e7454ef8e1a25b5acd364367080">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00168.html#3c03eb40955b933b01987222722ac4bd">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>assign()
+: <a class="el" href="a00166.html#93a06b3112cb804f42f40efb5e7387b4">tbb::concurrent_vector&lt; T, A &gt;</a><li>at()
+: <a class="el" href="a00166.html#23e14a38af748edff96a7adc3a0f1c58">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_b">- b -</a></h3><ul>
+<li>back()
+: <a class="el" href="a00166.html#bd518e204107d07fd08d0ec5bdfd383d">tbb::concurrent_vector&lt; T, A &gt;</a><li>begin()
+: <a class="el" href="a00168.html#84afb3906a39e399cde1c950d6351300">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#78a06182276ff758788d4c0623ae0d71">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00152.html#18d2258400756ac1446dac7676b18df3">tbb::blocked_range&lt; Value &gt;</a>, <a class="el" href="a00146.html#0d702fc6b9e9d061ace3501b3c861cdf">tbb::aligned_space&lt; T, N &gt;</a><li>blocked_range()
+: <a class="el" href="a00152.html#4c0efd2be3f96a0ab3ba5085e8b3fcc7">tbb::blocked_range&lt; Value &gt;</a><li>bucket_count()
+: <a class="el" href="a00160.html#414d15033d36c63aa3a40666dc4d6f5e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_c">- c -</a></h3><ul>
+<li>cancel_group_execution()
+: <a class="el" href="a00204.html#0f3fb4aac549ab642022450a4bd13326">tbb::task</a>, <a class="el" href="a00206.html#8bcdfdf4e6bfb76125b6de15c00b571d">tbb::task_group_context</a><li>capacity()
+: <a class="el" href="a00166.html#3ed6b9ae7217af5103d974045b6f5cd5">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#b2888b3e4e837d7e03f2c731963a402b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>cbegin()
+: <a class="el" href="a00166.html#f88fcf1c920693c39bd9709db33c199f">tbb::concurrent_vector&lt; T, A &gt;</a><li>cend()
+: <a class="el" href="a00166.html#0c15a5d0f1cf75d687dabba07da1d46b">tbb::concurrent_vector&lt; T, A &gt;</a><li>clear()
+: <a class="el" href="a00207.html#fce446ee13e025969945328f3ff59b95">tbb::task_list</a>, <a class="el" href="a00185.html#2c84aef5b834b555ee220b176e25931e">tbb::pipeline</a>, <a class="el" href="a00168.html#a8764176d4b6014c5d65f1051851abc8">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#26f937a359a66b6aae904c3cd9a3c444">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#90b31e2954c6e4596c7900435a5f4bc1">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#c32e8e84c0524155133b4aae32d2a827">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#a9f89be8fe28835749529d91081a2511">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>cols()
+: <a class="el" href="a00154.html#3336ba9480fd6c43e158f9beb024c050">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#392a46759af2c884957115771affa7f4">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>compact()
+: <a class="el" href="a00166.html#1693d1da41b1a8235871be9c6633be35">tbb::concurrent_vector&lt; T, A &gt;</a><li>concurrent_bounded_queue()
+: <a class="el" href="a00159.html#a5e04dcd7db9fd9b583b4e7df832246a">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>concurrent_hash_map()
+: <a class="el" href="a00160.html#3bfe75fcb350ce39cf610c164f233edc">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>concurrent_queue()
+: <a class="el" href="a00165.html#9102b897776bd2d9e908e6604ff16b5f">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#8a6b98ea11a867db8ac868f0113ca429">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>concurrent_vector()
+: <a class="el" href="a00166.html#4450de83c5862ea4bcd9443fd7e67419">tbb::concurrent_vector&lt; T, A &gt;</a><li>const_accessor()
+: <a class="el" href="a00163.html#a9ead65cca68d4c49c7ef64d7899a4c8">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>construct()
+: <a class="el" href="a00209.html#ab228ab9e324ed041c2226e1d717df5f">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#958ee8745c86c275bfc9533af565e017">tbb::cache_aligned_allocator&lt; T &gt;</a><li>context()
+: <a class="el" href="a00204.html#d8c36a93f3972590fbb65ff1cef3173b">tbb::task</a><li>count()
+: <a class="el" href="a00160.html#74f5ef06a06c5e619f156a1c76c04969">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>crbegin()
+: <a class="el" href="a00166.html#db78a1d28c9c966050e8a2926d834a33">tbb::concurrent_vector&lt; T, A &gt;</a><li>crend()
+: <a class="el" href="a00166.html#fff9cece89438587997ebedf93c5e962">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_d">- d -</a></h3><ul>
+<li>deallocate()
+: <a class="el" href="a00209.html#fdd011fdf2f9ad07006dc7c0a7ec1da2">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#f806a238c18cbcfb531e1e0a0d2ec59d">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00155.html#3d4eadf188f7d85d3805ae534e0b8e1c">tbb::cache_aligned_allocator&lt; T &gt;</a><li>decrement_ref_count()
+: <a class="el" href="a00204.html#ef4680f5c148020c5e7e43ddef44cd5d">tbb::task</a><li>default_num_threads()
+: <a class="el" href="a00208.html#ba00714c33a41a3c2216f48613971cab">tbb::task_scheduler_init</a><li>destroy()
+: <a class="el" href="a00212.html#921875bbacd2c8a5f324c7da7a415262">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00176.html#7a46873119d9f85a7b0009c13e41a258">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#93d875d3555502ff6f18513525de204c">tbb::captured_exception</a>, <a class="el" href="a00211.html#66c94938eca8bf88b76f3eccaaf215d8">tbb::tbb_exception</a>, <a class="el" href="a00209.html#ef133522bf55f05a605bee0763208281">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00204.html#dfaacf92685e5f86393bf657b2853bf8">tbb::task</a>, <a class="el" href="a00155.html#cd298895a4f1654b5149ec84b591ecb5">tbb::cache_aligned_allocator&lt; T &gt;</a><li>downgrade_to_reader()
+: <a class="el" href="a00202.html#c2c2c38a08cb9080e87099fac3e5bc94">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00190.html#0d2f93edf7b15ec4bcee138823220c52">tbb::queuing_rw_mutex::scoped_lock</a></ul>
+<h3><a class="anchor" name="index_e">- e -</a></h3><ul>
+<li>empty()
+: <a class="el" href="a00207.html#f3ac31e092814b90929f81bb30441959">tbb::task_list</a>, <a class="el" href="a00168.html#72595886d0ac8fd0543f90038570510d">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#c6426cb93cf20d3af40f3c90f1f0481a">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#f64924f2ee9225c368a270fc3c394db9">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#f3f6fce0cfa2d581d6f3b47e0613ad64">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#6cab7d029a3e73a653ef0faeac4d1586">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00163.html#5cce3104cb0a52e08d2131370871c614">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>, <a class="el" href="a00154.html#356860e1c977d91711e8216bd55c0b25">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#d144cb2d88cef553420311aca8667a44">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>, <a class="el" href="a00152.html#8f4f02f530eb3f2e7ea26e06f76aef9d">tbb::blocked_range&lt; Value &gt;</a><li>end()
+: <a class="el" href="a00168.html#cb448bb4977ce366ceb7344085cc7050">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#1e6aa764ce5a1cbd24526f68bc0a2f6b">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00152.html#8b929d93ddc13f148b11bceef3a3bdf8">tbb::blocked_range&lt; Value &gt;</a>, <a class="el" href="a00146.html#024be075c23c0394c9a2518d993bcd9e">tbb::aligned_space&lt; T, N &gt;</a><li>enqueue()
+: <a class="el" href="a00204.html#8365d063c0cc9d7bd616bca47256b93c">tbb::task</a><li>enumerable_thread_specific()
+: <a class="el" href="a00168.html#7bce6829981c9efe3f59cae2355e383e">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>erase()
+: <a class="el" href="a00160.html#0f500842d0cf791f8fa61662edb1b311">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>exclude()
+: <a class="el" href="a00160.html#faad2108bd2be75e52293486af59f11e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>execute()
+: <a class="el" href="a00204.html#22c298cd40937a431a06777423f002f6">tbb::task</a></ul>
+<h3><a class="anchor" name="index_f">- f -</a></h3><ul>
+<li>finalize()
+: <a class="el" href="a00169.html#56275eb889c77c4807967133e21401bd">tbb::filter</a><li>find()
+: <a class="el" href="a00160.html#bce7bdf46435115a95cca2aa73c5da83">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>front()
+: <a class="el" href="a00166.html#502615a858eb9fa0390ee59169065e90">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_g">- g -</a></h3><ul>
+<li>get_allocator()
+: <a class="el" href="a00166.html#2fdba8e90de6a4d2300222236d46758e">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#415eb87e53b1c6a266de06ecbc490d16">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#f034f70caef445fe8abc9113ec926a8d">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#199208eed6f09e200cda364f906be0fe">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>grainsize()
+: <a class="el" href="a00152.html#fcd2e5b8b6c11fd3f20fc0aa9f11bbc2">tbb::blocked_range&lt; Value &gt;</a><li>grow_by()
+: <a class="el" href="a00166.html#473a59a4c9308b93411b898b3110d26c">tbb::concurrent_vector&lt; T, A &gt;</a><li>grow_to_at_least()
+: <a class="el" href="a00166.html#a7e3b67c8ccab16d0aecc80899ae799d">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_i">- i -</a></h3><ul>
+<li>increment_ref_count()
+: <a class="el" href="a00204.html#f5fb43c7ad0de5a4b95703cebc39e345">tbb::task</a><li>init()
+: <a class="el" href="a00206.html#49a55352084fd44b8863d182e839e6dc">tbb::task_group_context</a><li>initialize()
+: <a class="el" href="a00208.html#d5ed214a8bb53b0466ed91ff4734b9a3">tbb::task_scheduler_init</a><li>insert()
+: <a class="el" href="a00160.html#cfe172677e5987004ef4a03e22fa338a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_copy()
+: <a class="el" href="a00160.html#3c27779fe66b79505390d084310d997e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_equal_range()
+: <a class="el" href="a00160.html#976c57edfb7f22b9f91a2e11f141eb4a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>internal_fast_find()
+: <a class="el" href="a00160.html#2f76ed101a0ccc8875b846c2f747897e">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>interval_t()
+: <a class="el" href="a00216.html#1a21a428e00cced2e6a49e0f5f2258bf">tbb::tick_count::interval_t</a><li>is_active()
+: <a class="el" href="a00208.html#12752282977029f23416642bc03e8b74">tbb::task_scheduler_init</a><li>is_bound()
+: <a class="el" href="a00169.html#15c29cae5d237e6d63dbfe5c94af89d5">tbb::filter</a><li>is_cancelled()
+: <a class="el" href="a00204.html#025f18118c057c4c8db87ff2ce8df975">tbb::task</a><li>is_divisible()
+: <a class="el" href="a00154.html#39d69191721c488e737ae5d9c5336b9c">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#ad36a9b38e4fef26d376f99552ce2d92">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>, <a class="el" href="a00152.html#41a58b703d574b6e1ca155df3576f578">tbb::blocked_range&lt; Value &gt;</a><li>is_group_execution_cancelled()
+: <a class="el" href="a00206.html#4db72f16210b0a991b2c134d6763a4cc">tbb::task_group_context</a><li>is_ordered()
+: <a class="el" href="a00169.html#cd53206c4795ef2df5df26b795caf692">tbb::filter</a><li>is_owned_by_current_thread()
+: <a class="el" href="a00204.html#c26718b3b247cd13deb1a741902e7105">tbb::task</a><li>is_serial()
+: <a class="el" href="a00169.html#fcfec27656a69ff2072802ac001e936f">tbb::filter</a><li>is_stolen_task()
+: <a class="el" href="a00204.html#f9169402702f56bf519448aaf34450aa">tbb::task</a><li>is_writer()
+: <a class="el" href="a00162.html#fc194e3a186dc935a5fb513cc9f8e898">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a></ul>
+<h3><a class="anchor" name="index_l">- l -</a></h3><ul>
+<li>local()
+: <a class="el" href="a00168.html#7dc79058d2832f7447de8e691c3455ea">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>lock()
+: <a class="el" href="a00201.html#4007d6e1523dbc3c2bb7f889ab789a8a">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#4f748989e19b6045e3a2d2ee73626a28">tbb::spin_mutex</a>, <a class="el" href="a00194.html#4c342c69d47f4bb0b393535dee4015d6">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#2653d1a2d560059a51219a8ceab3ade9">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#4470e61c24c129a0299ca6c17240adbb">tbb::mutex</a><li>lock_read()
+: <a class="el" href="a00201.html#13f799708ac4ca437a16be202e263e18">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00191.html#d9d16a24d9f6c3dada73c6b9ff214f5b">tbb::interface5::reader_writer_lock</a><li>lookup()
+: <a class="el" href="a00160.html#1f22480a290ddc6c145888d8f985531a">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_m">- m -</a></h3><ul>
+<li>max_size()
+: <a class="el" href="a00209.html#f059ca2c96243024f0d562ee3a87a3a5">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00196.html#880e766f1d913988c21973dbdd874fd5">tbb::scalable_allocator&lt; T &gt;</a>, <a class="el" href="a00166.html#2c248a017f0576df3e7cd99627836fd6">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#1e45d3cbd1e2ae06f365f1b48e0df0b5">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00155.html#fb23b687b4c0429dab4c7f8017266cf0">tbb::cache_aligned_allocator&lt; T &gt;</a><li>move()
+: <a class="el" href="a00176.html#1aea0ad179d6f0481fe7f3495f66adf9">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#837a50b8f6a800bda225c39d1699643f">tbb::captured_exception</a>, <a class="el" href="a00211.html#3e3482bf264d4ca4dde046cd9c02c766">tbb::tbb_exception</a><li>mutex()
+: <a class="el" href="a00177.html#05313cb77d4f85213103d4dab74ed454">tbb::mutex</a></ul>
+<h3><a class="anchor" name="index_n">- n -</a></h3><ul>
+<li>name()
+: <a class="el" href="a00176.html#bc5f5c4739b17ac5211ac58226c2f5a5">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#5af82fd677449c5ca727fa1d7e16f9f5">tbb::captured_exception</a>, <a class="el" href="a00211.html#d00f6497e552fee978a02bfcbebf46e2">tbb::tbb_exception</a><li>note_affinity()
+: <a class="el" href="a00204.html#713c338c8eeaebdc5a6b10a69c039b06">tbb::task</a><li>now()
+: <a class="el" href="a00215.html#fb7f78ca61cf28398645ace66e284473">tbb::tick_count</a></ul>
+<h3><a class="anchor" name="index_o">- o -</a></h3><ul>
+<li>operator *()
+: <a class="el" href="a00161.html#e8938f0cd1211e88a1d73527ed3636c4">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#30f31106840700a4c3664b9cb1c31ca7">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>operator delete()
+: <a class="el" href="a00211.html#3f2da7f3d8a6e4c1df522af1213afb5a">tbb::tbb_exception</a><li>operator()()
+: <a class="el" href="a00169.html#fa1b3dc1f4f47563ccab7f4d92f5b543">tbb::filter</a>, <a class="el" href="a00162.html#57c6110bd20e95c06de5a199de988941">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a><li>operator+=()
+: <a class="el" href="a00216.html#cd9814947902e26463a69a111530f81b">tbb::tick_count::interval_t</a><li>operator-=()
+: <a class="el" href="a00216.html#35ff7eaf7c2031b4a991402ac9ecb940">tbb::tick_count::interval_t</a><li>operator-&gt;()
+: <a class="el" href="a00161.html#fcebc32c020202cc37e60eadef157569">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#3d03a48ecb8cd9549bd8be64b09c9b0d">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>operator=()
+: <a class="el" href="a00166.html#19f4ab88a01b0fd056af3bba463e7bd6">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#2c0c42a2e1b5282b6739157df9ce2304">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>operator[]()
+: <a class="el" href="a00166.html#c6fade5c732cc95274d1d8277ea619d1">tbb::concurrent_vector&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_p">- p -</a></h3><ul>
+<li>pages()
+: <a class="el" href="a00154.html#cf971430aa12361d3ed245344b7c6764">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a><li>parallel_while()
+: <a class="el" href="a00184.html#36e26ba3880c7bcf804a97ba0cbe133f">tbb::parallel_while&lt; Body &gt;</a><li>parent()
+: <a class="el" href="a00204.html#314e98ee4347ccec83efcb9ee22e8596">tbb::task</a><li>pause()
+: <a class="el" href="a00149.html#a174ea93e3bd3d5cce82389c2f28d037">tbb::internal::atomic_backoff</a><li>pipeline()
+: <a class="el" href="a00185.html#596dc3beba27099c4c8581cb419e1a59">tbb::pipeline</a><li>pop()
+: <a class="el" href="a00159.html#41f4c6bd7a82ab070e840bbf81b0b123">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>pop_front()
+: <a class="el" href="a00207.html#5fe85df5ed524418389d34051750347d">tbb::task_list</a><li>pop_if_present()
+: <a class="el" href="a00165.html#48da3536245318af6cb5fd58bac78039">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a><li>process_item()
+: <a class="el" href="a00214.html#5e726bdc7fbd924c0b07bd558b1d4d5d">tbb::thread_bound_filter</a><li>push()
+: <a class="el" href="a00159.html#ceb08c743b11ba88c878e73fff8af20b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#73c47563ffcc4c2f6452f25a04ebe2e2">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>push_back()
+: <a class="el" href="a00207.html#4cd34756bc4763dafb8c84838a0124ff">tbb::task_list</a>, <a class="el" href="a00166.html#e94e038f915c0268fdf2d3d7f87d81b8">tbb::concurrent_vector&lt; T, A &gt;</a><li>push_if_not_full()
+: <a class="el" href="a00165.html#7c45561bafe71107d09b2bc1b8f4e681">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_q">- q -</a></h3><ul>
+<li>queuing_mutex()
+: <a class="el" href="a00187.html#b389ad9c4db7293e4bdb5b8cda69ec04">tbb::queuing_mutex</a><li>queuing_rw_mutex()
+: <a class="el" href="a00189.html#85c90877c3447690ac4e2ac4ff8dea5e">tbb::queuing_rw_mutex</a></ul>
+<h3><a class="anchor" name="index_r">- r -</a></h3><ul>
+<li>range()
+: <a class="el" href="a00168.html#3b068000cf4dbf9b40f8bb7e3fc53e0b">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#3d09ccfb581b879ae64203741035e193">tbb::concurrent_vector&lt; T, A &gt;</a><li>rbegin()
+: <a class="el" href="a00166.html#9f9c103e18d5f212703805354074ad44">tbb::concurrent_vector&lt; T, A &gt;</a><li>reader_writer_lock()
+: <a class="el" href="a00191.html#c1431c4293e777efd9aab9a95c2a46e1">tbb::interface5::reader_writer_lock</a><li>recursive_mutex()
+: <a class="el" href="a00194.html#d2fceb7f95c24a8cd1457d4527e4b8c6">tbb::recursive_mutex</a><li>recycle_as_child_of()
+: <a class="el" href="a00204.html#db399855177438bbc9cc61d508dae8d2">tbb::task</a><li>recycle_as_continuation()
+: <a class="el" href="a00204.html#a67a79e18f62b43a623a00cfbd76db4c">tbb::task</a><li>recycle_as_safe_continuation()
+: <a class="el" href="a00204.html#3b290d14109704e2b69dc1ac980a7a76">tbb::task</a><li>recycle_to_reexecute()
+: <a class="el" href="a00204.html#4f1be9bbcdb487830dbe298b68d85144">tbb::task</a><li>ref_count()
+: <a class="el" href="a00204.html#ad774f55eaec008ae02b236423209ced">tbb::task</a><li>register_pending_exception()
+: <a class="el" href="a00206.html#d97c8a03615594b71b4ef06ff75cf561">tbb::task_group_context</a><li>rehash()
+: <a class="el" href="a00160.html#13f3f2e8de7564be03882c31559493c9">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>release()
+: <a class="el" href="a00202.html#61b14d00a78185c9b2d206ebfc379124">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#eeb615e68e963e6bf8d9c11402d0ce8e">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#ac480ea0e9d5ea0345a67d57008b6263">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#67ae221109ddc69510ab593874e435d4">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#3bf2b8c87ff22115be9b2eac179f2d30">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#0d51d18cd99df3b2e93bf07378d0992c">tbb::mutex::scoped_lock</a>, <a class="el" href="a00163.html#d5ce4f88d8870290238a8ad621e6f270">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>rend()
+: <a class="el" href="a00166.html#d438b9b32ea3a8ffb703015b6dce055b">tbb::concurrent_vector&lt; T, A &gt;</a><li>reserve()
+: <a class="el" href="a00166.html#5a0ce05026994b010018f72cfdeb72c1">tbb::concurrent_vector&lt; T, A &gt;</a><li>reset()
+: <a class="el" href="a00206.html#6d30d16bf1cd22f86c6afaf29c2b430c">tbb::task_group_context</a><li>resize()
+: <a class="el" href="a00166.html#98ce6b2c6d2622f0c030b46dfac3880c">tbb::concurrent_vector&lt; T, A &gt;</a><li>rows()
+: <a class="el" href="a00154.html#1584623e59ff32a8aa82006827508be4">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>, <a class="el" href="a00153.html#f496e7348a82652fba581203477cc07c">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>run()
+: <a class="el" href="a00185.html#93d7fec8cd607b803dd2d79fb46bd260">tbb::pipeline</a>, <a class="el" href="a00184.html#b32a0a6e5e09ebb7fad3e6652c19afe5">tbb::parallel_while&lt; Body &gt;</a></ul>
+<h3><a class="anchor" name="index_s">- s -</a></h3><ul>
+<li>scoped_lock()
+: <a class="el" href="a00202.html#42a92d4f8fdde425b111cfa8a9228071">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#5ce6807050a9e8f87bcb4a65dccb12ef">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#dec17713c4c1321ac8fec66816d0c602">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00192.html#cf19f20e082887c1bb0ba6b0911c3583">tbb::interface5::reader_writer_lock::scoped_lock</a>, <a class="el" href="a00190.html#fbb8798792d3aebb136c46fc63d2529e">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#9b51ef972f5618ac17caadb58841ab6d">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#605a6b9af0f8cdabdf81825e0de99600">tbb::mutex::scoped_lock</a><li>scoped_lock_read()
+: <a class="el" href="a00193.html#87ab0dc8f7216e6ba0f7acd6aec33064">tbb::interface5::reader_writer_lock::scoped_lock_read</a><li>seconds()
+: <a class="el" href="a00216.html#d5d8429c0bc59cf6131b2abc7929fa59">tbb::tick_count::interval_t</a><li>self()
+: <a class="el" href="a00204.html#bd43e8d6249738efafd12d6a4c72c5e3">tbb::task</a><li>set_affinity()
+: <a class="el" href="a00204.html#dca19d7a45487a7d67a0db517e2b57c9">tbb::task</a><li>set_capacity()
+: <a class="el" href="a00159.html#f3c6c934f85fd02aedbc83a16943193b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>set_ref_count()
+: <a class="el" href="a00204.html#06a4206a57e8e12a439b14d6d41cfd92">tbb::task</a><li>set_state()
+: <a class="el" href="a00177.html#795649a185b0d6af6dc81c5f378616dd">tbb::mutex</a><li>shrink_to_fit()
+: <a class="el" href="a00166.html#03c6f4cf66532bf4cc907ee738a9a186">tbb::concurrent_vector&lt; T, A &gt;</a><li>size()
+: <a class="el" href="a00168.html#33fd6593da1ed14340f10f67d5a69130">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00166.html#715fe313c4a9c22731cc404dd80c9ec9">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00159.html#7dc14d1a579a4cccda9f857585e1768d">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00160.html#17fd8c5fe8c6a86075f34aa4e8412ba3">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>, <a class="el" href="a00152.html#9eaa0b6beff1420f688570bbf6b8c462">tbb::blocked_range&lt; Value &gt;</a><li>spawn_and_wait_for_all()
+: <a class="el" href="a00204.html#894ab68378e502776d8220eea7ce9fa1">tbb::task</a><li>spawn_root_and_wait()
+: <a class="el" href="a00204.html#c33c7edbaec67aa8a56f48986a9dc69f">tbb::task</a><li>spin_mutex()
+: <a class="el" href="a00199.html#3d8fb44644fd8d41ada1fbeba7409be3">tbb::spin_mutex</a><li>spin_rw_mutex_v3()
+: <a class="el" href="a00201.html#61332b2756de89f3f5f69310cbb6e70c">tbb::spin_rw_mutex_v3</a><li>state()
+: <a class="el" href="a00204.html#0af7b2d7e6e8b4333b2accfce3dfb374">tbb::task</a><li>swap()
+: <a class="el" href="a00166.html#96c9c4bd968ed3edb8dd276854d2dae0">tbb::concurrent_vector&lt; T, A &gt;</a>, <a class="el" href="a00160.html#eddb0d2efe0b4f25a85c059e1c3dac15">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a></ul>
+<h3><a class="anchor" name="index_t">- t -</a></h3><ul>
+<li>task()
+: <a class="el" href="a00204.html#2bce8ec6e44706e70128f5cf91b76e67">tbb::task</a><li>task_group_context()
+: <a class="el" href="a00206.html#19fee08fb8ac98adccfe69c1aa63c491">tbb::task_group_context</a><li>task_list()
+: <a class="el" href="a00207.html#416341c2047eaef50417b41eaf7e9de6">tbb::task_list</a><li>task_scheduler_init()
+: <a class="el" href="a00208.html#421600bf9bf9338bcf937063f2ff0e90">tbb::task_scheduler_init</a><li>terminate()
+: <a class="el" href="a00208.html#f73257e04cb7fb9bd5be2b635d9016f1">tbb::task_scheduler_init</a><li>throw_self()
+: <a class="el" href="a00212.html#292832fd5c523e3d8081a22247840a1d">tbb::internal::tbb_exception_ptr</a>, <a class="el" href="a00176.html#17cffba35811c92b7e65d63506b69602">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#2dd1be66ab32fa27e0ddef5707fa67ef">tbb::captured_exception</a>, <a class="el" href="a00211.html#8588e07fa49692f4d734e4f2e4f048f4">tbb::tbb_exception</a><li>tick_count()
+: <a class="el" href="a00215.html#34593326ae4191e02a13c7cbdab9de4c">tbb::tick_count</a><li>try_acquire()
+: <a class="el" href="a00202.html#9879626968d9b9a04cd2ec0fb2e84ae1">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#9297ec188534b45dc0ca48f2f39a0501">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#36bfc3e93e3ef6340abef4901444d340">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00190.html#2e4ff6c9ec2fee6682f95290d1f42baa">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#e5a014fb817599386a87170cf2cf51a9">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#591e0c49b82bcedffcbe0923f1b915ec">tbb::mutex::scoped_lock</a><li>try_lock()
+: <a class="el" href="a00201.html#088bb256be794cc47d3b83791632fdfc">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#8f9a58fb56a2b4c5efe1a7f7c1ae2074">tbb::spin_mutex</a>, <a class="el" href="a00194.html#86e719b0afee25704af11ab97694d240">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#721eb173e154ab38292273e9266a9b07">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#4331652c79dea1c1131bd59ab161b234">tbb::mutex</a><li>try_lock_read()
+: <a class="el" href="a00201.html#b8667415869013f840d976aa406d385a">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00191.html#595fb23952e3b89426b1f7938dea9b11">tbb::interface5::reader_writer_lock</a><li>try_pop()
+: <a class="el" href="a00159.html#0ca487019bbb00a196442aff78a1e4f7">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#ae31ca0db34ef96ef1e74aa0d28c95f8">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>try_process_item()
+: <a class="el" href="a00214.html#c4f90f2c771bce748beb9be734fa286c">tbb::thread_bound_filter</a><li>try_push()
+: <a class="el" href="a00159.html#2bd6232531279fb3ccbd296bea23066b">tbb::concurrent_bounded_queue&lt; T, A &gt;</a></ul>
+<h3><a class="anchor" name="index_u">- u -</a></h3><ul>
+<li>unlock()
+: <a class="el" href="a00201.html#f9f52ead2098eb5fb12da59d5ae53b55">tbb::spin_rw_mutex_v3</a>, <a class="el" href="a00199.html#0e843ee6265f57f27d228ba91e7308ef">tbb::spin_mutex</a>, <a class="el" href="a00194.html#f0a96e26b7f074588dc31e32524856ae">tbb::recursive_mutex</a>, <a class="el" href="a00191.html#5113b32689305599b2c36b5831547704">tbb::interface5::reader_writer_lock</a>, <a class="el" href="a00177.html#5fc9ef443ae75d966695546be399cc6b">tbb::mutex</a><li>unsafe_size()
+: <a class="el" href="a00164.html#eaa35a5274606779802e9a669a706260">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>upgrade_to_writer()
+: <a class="el" href="a00202.html#3f0b1e3f2efab63336400348bd070226">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00190.html#11ba1da4a722c9e6f73339a52c487e82">tbb::queuing_rw_mutex::scoped_lock</a></ul>
+<h3><a class="anchor" name="index_w">- w -</a></h3><ul>
+<li>wait_for_all()
+: <a class="el" href="a00204.html#53d2615ad9c38859b4c8080936600283">tbb::task</a><li>what()
+: <a class="el" href="a00176.html#b33a89bccf0c63106f1270c7bfaaf54f">tbb::movable_exception&lt; ExceptionData &gt;</a>, <a class="el" href="a00157.html#6b5988ef74a1fe2a58998d110b3633e0">tbb::captured_exception</a>, <a class="el" href="a00211.html#e8157689ecb66bc6c72d3618bf3cc371">tbb::tbb_exception</a></ul>
+<h3><a class="anchor" name="index_~">- ~ -</a></h3><ul>
+<li>~combinable()
+: <a class="el" href="a00158.html#2c87e79ae98588a5780f708773388843">tbb::combinable&lt; T &gt;</a><li>~concurrent_bounded_queue()
+: <a class="el" href="a00159.html#acaf5b510dc0dfc7780b8c956cf773cf">tbb::concurrent_bounded_queue&lt; T, A &gt;</a><li>~concurrent_hash_map()
+: <a class="el" href="a00160.html#a1ac58997d8fbf242b266e3691573481">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a><li>~concurrent_queue()
+: <a class="el" href="a00164.html#830b33753d6b149c366344e29b2edd8c">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>~concurrent_vector()
+: <a class="el" href="a00166.html#da2444b28bb840d38f60d0030333a5fc">tbb::concurrent_vector&lt; T, A &gt;</a><li>~const_accessor()
+: <a class="el" href="a00163.html#752b0c1ec74b94786403a75e42917d01">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a><li>~enumerable_thread_specific()
+: <a class="el" href="a00168.html#5a7907d9e3e5b18e7a7b55211ef3213f">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a><li>~filter()
+: <a class="el" href="a00169.html#66d159f362293e3964ba3da8bc1d2604">tbb::filter</a><li>~parallel_while()
+: <a class="el" href="a00184.html#6fcfc973cc56b79c6d0fbb8a31be7e84">tbb::parallel_while&lt; Body &gt;</a><li>~pipeline()
+: <a class="el" href="a00185.html#49513c6c24f9d5bbbb27edca5efe01c9">tbb::pipeline</a><li>~queuing_rw_mutex()
+: <a class="el" href="a00189.html#1ba73e3d95cfdf8323880bc623af9099">tbb::queuing_rw_mutex</a><li>~reader_writer_lock()
+: <a class="el" href="a00191.html#5135f64f7b7339017f33d956445edbee">tbb::interface5::reader_writer_lock</a><li>~scoped_lock()
+: <a class="el" href="a00202.html#d7eaaa3f2e2c5dc11e7005811b1bdd04">tbb::spin_rw_mutex_v3::scoped_lock</a>, <a class="el" href="a00200.html#ac6fa425d1f06c56d8b70abc51aac844">tbb::spin_mutex::scoped_lock</a>, <a class="el" href="a00195.html#c1197ffb8f3cd9d4fed71d7e06265b7c">tbb::recursive_mutex::scoped_lock</a>, <a class="el" href="a00192.html#70246e0260493625ff956fa5926fc71f">tbb::interface5::reader_writer_lock::scoped_lock</a>, <a class="el" href="a00190.html#32c7d67a660d23ebbaab1a1d2826d31a">tbb::queuing_rw_mutex::scoped_lock</a>, <a class="el" href="a00188.html#ac2c576a93570957d694192a5f491443">tbb::queuing_mutex::scoped_lock</a>, <a class="el" href="a00178.html#0ebbbecaf4311e9df7362cb76ceaa368">tbb::mutex::scoped_lock</a><li>~scoped_lock_read()
+: <a class="el" href="a00193.html#bd21c5f3d555d64d1de8658e15bf4966">tbb::interface5::reader_writer_lock::scoped_lock_read</a><li>~spin_rw_mutex_v3()
+: <a class="el" href="a00201.html#9a815fb2759e55072ed413f1b6970cf3">tbb::spin_rw_mutex_v3</a><li>~task()
+: <a class="el" href="a00204.html#98245ee0473f84cb19dbbf8c81134908">tbb::task</a><li>~task_list()
+: <a class="el" href="a00207.html#6d438f1499a02db1e59c24ab6043e5ba">tbb::task_list</a><li>~task_scheduler_init()
+: <a class="el" href="a00208.html#4da6c86292d80c703a66c1f6f5299488">tbb::task_scheduler_init</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_rela.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_rela.html
new file mode 100644 (file)
index 0000000..b6128f3
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Related Functions</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li id="current"><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>make_filter
+: <a class="el" href="a00170.html#85c2892eff1fddcd06e28911e75838bd">tbb::interface5::filter_t&lt; T, U &gt;</a><li>operator+
+: <a class="el" href="a00216.html#5871ead1ca230efbe52a5008470e6428">tbb::tick_count::interval_t</a><li>operator-
+: <a class="el" href="a00215.html#09dde78a4100800c11bb883d6204b586">tbb::tick_count</a>, <a class="el" href="a00216.html#fa509691e1d689830931e36edd274f76">tbb::tick_count::interval_t</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_type.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_type.html
new file mode 100644 (file)
index 0000000..8e45399
--- /dev/null
@@ -0,0 +1,57 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Typedefs</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li id="current"><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>affinity_id
+: <a class="el" href="a00204.html#d61bb32389d3857bf7511d69beaafb76">tbb::task</a><li>allocator_type
+: <a class="el" href="a00209.html#78701e7454ef8e1a25b5acd364367080">tbb::tbb_allocator&lt; T &gt;</a>, <a class="el" href="a00168.html#3c03eb40955b933b01987222722ac4bd">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>, <a class="el" href="a00159.html#2e2726fccf6d975dc1071608cc0bbf90">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#5a3956341728eaa558d8827063718cac">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>const_iterator
+: <a class="el" href="a00152.html#1a8d05842c2b3dfc177bc4d347e4cef7">tbb::blocked_range&lt; Value &gt;</a><li>const_reference
+: <a class="el" href="a00159.html#796713d0b9ba93a4721cbe13e4474068">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#4d48e7ff93f81636bca2c74f7da34750">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>difference_type
+: <a class="el" href="a00159.html#4b45c91297e69515d83d5eef85ae1f49">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#068576d16c7e4e05d52f9db7a45b5b65">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>native_handle_type
+: <a class="el" href="a00194.html#889fa8cc32dd707eef7c0f52dda09c0d">tbb::recursive_mutex</a>, <a class="el" href="a00177.html#9f1ec84d5815263ceae853f06ddb4cac">tbb::mutex</a><li>page_range_type
+: <a class="el" href="a00154.html#b8ebf17a552ba47825e9b3887855b719">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a><li>reference
+: <a class="el" href="a00159.html#dcd44ca6a88c0dc7a847a47a10811f0c">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#a8d725c50a9834bb7af5b67c0aff92b8">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a><li>row_range_type
+: <a class="el" href="a00153.html#a807a22fe658ec38b8edfd69521d0383">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a><li>size_type
+: <a class="el" href="a00159.html#a80e4c11dbb324e4b92a24a77bbcde68">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#8fc30e93f8342a1960357f71e4fe8a2b">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00152.html#f5707bffea38eee5c9680f37358afb8e">tbb::blocked_range&lt; Value &gt;</a><li>value_type
+: <a class="el" href="a00184.html#fa297e53d3af2a101e712bc200233e9c">tbb::parallel_while&lt; Body &gt;</a>, <a class="el" href="a00159.html#98245517a931e5893f6601e66c51fc75">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>, <a class="el" href="a00164.html#682c3978d5cb0620000994f11c44a476">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>, <a class="el" href="a00161.html#49eec74f272bab187d176c0d9d16a7fe">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>, <a class="el" href="a00163.html#48647ca0d79c1233b997f5768403c926">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_vars.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/functions_vars.html
new file mode 100644 (file)
index 0000000..46cc7f4
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members - Variables</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li id="current"><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="functions.html"><span>All</span></a></li>
+    <li><a href="functions_func.html"><span>Functions</span></a></li>
+    <li id="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    <li><a href="functions_type.html"><span>Typedefs</span></a></li>
+    <li><a href="functions_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="functions_eval.html"><span>Enumerator</span></a></li>
+    <li><a href="functions_rela.html"><span>Related&nbsp;Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>automatic
+: <a class="el" href="a00208.html#8f5988e2b0fbb2d533fcbb7f2583743f">tbb::task_scheduler_init</a><li>deferred
+: <a class="el" href="a00208.html#e6c860f1e559026ff3ef4599c0d6c514">tbb::task_scheduler_init</a><li>my_exception_data
+: <a class="el" href="a00176.html#a8c0ae2089ae784b28907cf748b89416">tbb::movable_exception&lt; ExceptionData &gt;</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/globals.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/globals.html
new file mode 100644 (file)
index 0000000..f63624d
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li id="current"><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="globals.html"><span>All</span></a></li>
+    <li><a href="globals_func.html"><span>Functions</span></a></li>
+  </ul>
+</div>
+Here is a list of all documented file members with links to the documentation:
+<p>
+<ul>
+<li>scalable_aligned_free()
+: <a class="el" href="a00282.html#g65a20e812012f15ec7442d5b45d0cba5">scalable_allocator.h</a><li>scalable_aligned_malloc()
+: <a class="el" href="a00282.html#gc1c7aaa1fe85c17ba5a3a96f7e8d89e7">scalable_allocator.h</a><li>scalable_aligned_realloc()
+: <a class="el" href="a00282.html#gbaea91376be80dfabd7c93eaffd9abaa">scalable_allocator.h</a><li>scalable_calloc()
+: <a class="el" href="a00282.html#g3f5a2fde0bcaa3eda35be32c8658f444">scalable_allocator.h</a><li>scalable_free()
+: <a class="el" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_allocator.h</a><li>scalable_malloc()
+: <a class="el" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_allocator.h</a><li>scalable_msize()
+: <a class="el" href="a00282.html#g0965ce1b4b7835f92869c7fd867265f7">scalable_allocator.h</a><li>scalable_posix_memalign()
+: <a class="el" href="a00282.html#g05dcec987480bb2c82ecdead6a085899">scalable_allocator.h</a><li>scalable_realloc()
+: <a class="el" href="a00282.html#g951bbbbd2d041acb59ba5fa910b52543">scalable_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/globals_func.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/globals_func.html
new file mode 100644 (file)
index 0000000..ef8fbd5
--- /dev/null
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li id="current"><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="files.html"><span>File&nbsp;List</span></a></li>
+    <li id="current"><a href="globals.html"><span>File&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="globals.html"><span>All</span></a></li>
+    <li id="current"><a href="globals_func.html"><span>Functions</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>scalable_aligned_free()
+: <a class="el" href="a00282.html#g65a20e812012f15ec7442d5b45d0cba5">scalable_allocator.h</a><li>scalable_aligned_malloc()
+: <a class="el" href="a00282.html#gc1c7aaa1fe85c17ba5a3a96f7e8d89e7">scalable_allocator.h</a><li>scalable_aligned_realloc()
+: <a class="el" href="a00282.html#gbaea91376be80dfabd7c93eaffd9abaa">scalable_allocator.h</a><li>scalable_calloc()
+: <a class="el" href="a00282.html#g3f5a2fde0bcaa3eda35be32c8658f444">scalable_allocator.h</a><li>scalable_free()
+: <a class="el" href="a00282.html#gca3579c21244dba9f0c351e5984d4565">scalable_allocator.h</a><li>scalable_malloc()
+: <a class="el" href="a00282.html#gc25b8e6c76db0b346a8249796a7a2475">scalable_allocator.h</a><li>scalable_msize()
+: <a class="el" href="a00282.html#g0965ce1b4b7835f92869c7fd867265f7">scalable_allocator.h</a><li>scalable_posix_memalign()
+: <a class="el" href="a00282.html#g05dcec987480bb2c82ecdead6a085899">scalable_allocator.h</a><li>scalable_realloc()
+: <a class="el" href="a00282.html#g951bbbbd2d041acb59ba5fa910b52543">scalable_allocator.h</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/hierarchy.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/hierarchy.html
new file mode 100644 (file)
index 0000000..6801928
--- /dev/null
@@ -0,0 +1,119 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Hierarchical Index</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li id="current"><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="annotated.html"><span>Class&nbsp;List</span></a></li>
+    <li id="current"><a href="hierarchy.html"><span>Class&nbsp;Hierarchy</span></a></li>
+    <li><a href="functions.html"><span>Class&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>Class Hierarchy</h1>This inheritance list is sorted roughly, but not completely, alphabetically:<ul>
+<li><a class="el" href="a00145.html">tbb::affinity_partitioner</a>
+<li><a class="el" href="a00146.html">tbb::aligned_space&lt; T, N &gt;</a>
+<li><a class="el" href="a00147.html">tbb::atomic&lt; T &gt;</a>
+<li><a class="el" href="a00148.html">tbb::atomic&lt; void * &gt;</a>
+<li><a class="el" href="a00149.html">tbb::internal::atomic_backoff</a>
+<li><a class="el" href="a00150.html">tbb::auto_partitioner</a>
+<li><a class="el" href="a00151.html">tbb::bad_last_alloc</a>
+<li><a class="el" href="a00152.html">tbb::blocked_range&lt; Value &gt;</a>
+<li><a class="el" href="a00153.html">tbb::blocked_range2d&lt; RowValue, ColValue &gt;</a>
+<li><a class="el" href="a00154.html">tbb::blocked_range3d&lt; PageValue, RowValue, ColValue &gt;</a>
+<li><a class="el" href="a00152.html">tbb::blocked_range&lt; I &gt;</a>
+<li><a class="el" href="a00155.html">tbb::cache_aligned_allocator&lt; T &gt;</a>
+<li><a class="el" href="a00156.html">tbb::cache_aligned_allocator&lt; void &gt;</a>
+<li><a class="el" href="a00158.html">tbb::combinable&lt; T &gt;</a>
+<li><a class="el" href="a00159.html">tbb::concurrent_bounded_queue&lt; T, A &gt;</a>
+<ul>
+<li><a class="el" href="a00165.html">tbb::deprecated::concurrent_queue&lt; T, A &gt;</a>
+</ul>
+<li><a class="el" href="a00160.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;</a>
+<li><a class="el" href="a00162.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::bucket_accessor</a>
+<li><a class="el" href="a00163.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::const_accessor</a>
+<ul>
+<li><a class="el" href="a00161.html">tbb::interface4::concurrent_hash_map&lt; Key, T, HashCompare, Allocator &gt;::accessor</a>
+</ul>
+<li><a class="el" href="a00164.html">tbb::strict_ppl::concurrent_queue&lt; T, A &gt;</a>
+<li><a class="el" href="a00166.html">tbb::concurrent_vector&lt; T, A &gt;</a>
+<li><a class="el" href="a00168.html">tbb::interface6::enumerable_thread_specific&lt; T, Allocator, ETS_key_type &gt;</a>
+<li><a class="el" href="a00169.html">tbb::filter</a>
+<ul>
+<li><a class="el" href="a00214.html">tbb::thread_bound_filter</a>
+</ul>
+<li><a class="el" href="a00170.html">tbb::interface5::filter_t&lt; T, U &gt;</a>
+<li><a class="el" href="a00171.html">tbb::final_scan_tag</a>
+<li><a class="el" href="a00172.html">tbb::interface5::flow_control</a>
+<li><a class="el" href="a00173.html">tbb::improper_lock</a>
+<li><b>atomic_impl</b><li><a class="el" href="a00174.html">tbb::invalid_multiple_scheduling</a>
+<li><a class="el" href="a00175.html">tbb::missing_wait</a>
+<li><a class="el" href="a00177.html">tbb::mutex</a>
+<li><a class="el" href="a00178.html">tbb::mutex::scoped_lock</a>
+<li><a class="el" href="a00179.html">tbb::null_mutex</a>
+<li><a class="el" href="a00180.html">tbb::null_mutex::scoped_lock</a>
+<li><a class="el" href="a00181.html">tbb::null_rw_mutex</a>
+<li><a class="el" href="a00182.html">tbb::null_rw_mutex::scoped_lock</a>
+<li><a class="el" href="a00183.html">tbb::parallel_do_feeder&lt; Item &gt;</a>
+<li><a class="el" href="a00184.html">tbb::parallel_while&lt; Body &gt;</a>
+<li><a class="el" href="a00185.html">tbb::pipeline</a>
+<li><a class="el" href="a00186.html">tbb::pre_scan_tag</a>
+<li><a class="el" href="a00187.html">tbb::queuing_mutex</a>
+<li><a class="el" href="a00188.html">tbb::queuing_mutex::scoped_lock</a>
+<li><a class="el" href="a00189.html">tbb::queuing_rw_mutex</a>
+<li><a class="el" href="a00190.html">tbb::queuing_rw_mutex::scoped_lock</a>
+<li><a class="el" href="a00191.html">tbb::interface5::reader_writer_lock</a>
+<li><a class="el" href="a00192.html">tbb::interface5::reader_writer_lock::scoped_lock</a>
+<li><a class="el" href="a00193.html">tbb::interface5::reader_writer_lock::scoped_lock_read</a>
+<li><a class="el" href="a00194.html">tbb::recursive_mutex</a>
+<li><a class="el" href="a00195.html">tbb::recursive_mutex::scoped_lock</a>
+<li><a class="el" href="a00196.html">tbb::scalable_allocator&lt; T &gt;</a>
+<li><a class="el" href="a00197.html">tbb::scalable_allocator&lt; void &gt;</a>
+<li><a class="el" href="a00198.html">tbb::simple_partitioner</a>
+<li><a class="el" href="a00199.html">tbb::spin_mutex</a>
+<li><a class="el" href="a00200.html">tbb::spin_mutex::scoped_lock</a>
+<li><a class="el" href="a00201.html">tbb::spin_rw_mutex_v3</a>
+<li><a class="el" href="a00202.html">tbb::spin_rw_mutex_v3::scoped_lock</a>
+<li><a class="el" href="a00203.html">tbb::split</a>
+<li><a class="el" href="a00204.html">tbb::task</a>
+<ul>
+<li><a class="el" href="a00167.html">tbb::empty_task</a>
+</ul>
+<li><a class="el" href="a00205.html">tbb::interface5::internal::task_base</a>
+<li><a class="el" href="a00206.html">tbb::task_group_context</a>
+<li><a class="el" href="a00207.html">tbb::task_list</a>
+<li><a class="el" href="a00208.html">tbb::task_scheduler_init</a>
+<li><a class="el" href="a00209.html">tbb::tbb_allocator&lt; T &gt;</a>
+<li><a class="el" href="a00210.html">tbb::tbb_allocator&lt; void &gt;</a>
+<li><a class="el" href="a00211.html">tbb::tbb_exception</a>
+<ul>
+<li><a class="el" href="a00157.html">tbb::captured_exception</a>
+<li><a class="el" href="a00176.html">tbb::movable_exception&lt; ExceptionData &gt;</a>
+</ul>
+<li><a class="el" href="a00212.html">tbb::internal::tbb_exception_ptr</a>
+<li><a class="el" href="a00213.html">tbb::tbb_hash_compare&lt; Key &gt;</a>
+<li><a class="el" href="a00215.html">tbb::tick_count</a>
+<li><a class="el" href="a00216.html">tbb::tick_count::interval_t</a>
+<li><a class="el" href="a00217.html">tbb::internal::work_around_alignment_bug&lt; Size, T &gt;</a>
+<li><a class="el" href="a00218.html">tbb::zero_allocator&lt; T, Allocator &gt;</a>
+<li><a class="el" href="a00219.html">tbb::zero_allocator&lt; void, Allocator &gt;</a>
+</ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/index.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/index.html
new file mode 100644 (file)
index 0000000..b41f471
--- /dev/null
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Main Page</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Main Page</h1>
+<p>
+Click the tabs above for information about the<ul>
+<li><a href="./modules.html">Modules</a> (groups of functionality) implemented by the library</li><li><a href="./annotated.html">Classes</a> provided by the library</li><li><a href="./files.html">Files</a> constituting the library.</li></ul>
+Please note that significant part of TBB functionality is implemented in the form of template functions, descriptions of which are not accessible on the <a href="./annotated.html">Classes</a> tab. Use <a href="./modules.html">Modules</a> or <a href="./namespacemembers.html">Namespace/Namespace Members</a> tabs to find them.<p>
+Additional pieces of information can be found here<ul>
+<li><a class="el" href="concepts.html">TBB concepts</a></li></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/modules.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/modules.html
new file mode 100644 (file)
index 0000000..d8519f6
--- /dev/null
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Module Index</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li id="current"><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Modules</h1>Here is a list of all modules:<ul>
+<li><a class="el" href="a00280.html">Algorithms</a>
+<li><a class="el" href="a00281.html">Containers</a>
+<li><a class="el" href="a00282.html">Memory Allocation</a>
+<li><a class="el" href="a00283.html">Synchronization</a>
+<li><a class="el" href="a00284.html">Timing</a>
+<li><a class="el" href="a00285.html">Task Scheduling</a>
+</ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers.html
new file mode 100644 (file)
index 0000000..7b8f314
--- /dev/null
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li id="current"><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="namespacemembers.html"><span>All</span></a></li>
+    <li><a href="namespacemembers_func.html"><span>Functions</span></a></li>
+    <li><a href="namespacemembers_type.html"><span>Typedefs</span></a></li>
+    <li><a href="namespacemembers_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="namespacemembers_eval.html"><span>Enumerator</span></a></li>
+  </ul>
+</div>
+Here is a list of all documented namespace members with links to the namespaces they belong to:
+<p>
+<ul>
+<li>__TBB_DECL_ATOMIC_ALT()
+: <a class="el" href="a00272.html#ad165cf61abbe349d413df2589679add">tbb</a><li>__TBB_full_fence
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">tbb</a><li>acquire
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">tbb</a><li>assertion_failure()
+: <a class="el" href="a00272.html#3d1252787be39b4aef311f1cadaff9e8">tbb</a><li>assertion_handler_type
+: <a class="el" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">tbb</a><li>ets_key_usage_type
+: <a class="el" href="a00272.html#a8622ae61b7e7737dac26542e181178e">tbb</a><li>memory_semantics
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef">tbb</a><li>parallel_do()
+: <a class="el" href="a00280.html#g2617dc9b88b3285a7212599d49f74228">tbb</a><li>parallel_for()
+: <a class="el" href="a00279.html#490399525b1e690ec31d6db964c6b272">tbb::strict_ppl</a>, <a class="el" href="a00280.html#g04b4696b67370c01353ff5974c8f1196">tbb</a><li>parallel_for_each()
+: <a class="el" href="a00280.html#gc2d710ca573f0a9bd94379cba3772def">tbb</a><li>parallel_invoke()
+: <a class="el" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">tbb</a><li>parallel_reduce()
+: <a class="el" href="a00280.html#g496bd7eadb3b97495ccb5655ef90319e">tbb</a><li>parallel_scan()
+: <a class="el" href="a00280.html#g62fde400a37bbca1a2fddc8e3d22f556">tbb</a><li>parallel_sort()
+: <a class="el" href="a00280.html#gc7576f82fdedc8a701a6c17ad9415926">tbb</a><li>release
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">tbb</a><li>set_assertion_handler()
+: <a class="el" href="a00272.html#823fa1c15dd829d1d9167157450ddcd9">tbb</a><li>TBB_runtime_interface_version()
+: <a class="el" href="a00272.html#a6858b22e90041c9c4669674ff39b056">tbb</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_enum.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_enum.html
new file mode 100644 (file)
index 0000000..6f4b074
--- /dev/null
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li id="current"><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespacemembers.html"><span>All</span></a></li>
+    <li><a href="namespacemembers_func.html"><span>Functions</span></a></li>
+    <li><a href="namespacemembers_type.html"><span>Typedefs</span></a></li>
+    <li id="current"><a href="namespacemembers_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="namespacemembers_eval.html"><span>Enumerator</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>ets_key_usage_type
+: <a class="el" href="a00272.html#a8622ae61b7e7737dac26542e181178e">tbb</a><li>memory_semantics
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef">tbb</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_eval.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_eval.html
new file mode 100644 (file)
index 0000000..8e0a92c
--- /dev/null
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li id="current"><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespacemembers.html"><span>All</span></a></li>
+    <li><a href="namespacemembers_func.html"><span>Functions</span></a></li>
+    <li><a href="namespacemembers_type.html"><span>Typedefs</span></a></li>
+    <li><a href="namespacemembers_enum.html"><span>Enumerations</span></a></li>
+    <li id="current"><a href="namespacemembers_eval.html"><span>Enumerator</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>__TBB_full_fence
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefc6db01678b1749dff7554688d079520c">tbb</a><li>acquire
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fef5f1fafe8d229d348ff91d937f64e79c7">tbb</a><li>release
+: <a class="el" href="a00272.html#a8686246bb5d3664bd07563749970fefaa1fa107db0245c41fb109d976ae8d70">tbb</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_func.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_func.html
new file mode 100644 (file)
index 0000000..66f9f4d
--- /dev/null
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li id="current"><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespacemembers.html"><span>All</span></a></li>
+    <li id="current"><a href="namespacemembers_func.html"><span>Functions</span></a></li>
+    <li><a href="namespacemembers_type.html"><span>Typedefs</span></a></li>
+    <li><a href="namespacemembers_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="namespacemembers_eval.html"><span>Enumerator</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>__TBB_DECL_ATOMIC_ALT()
+: <a class="el" href="a00272.html#ad165cf61abbe349d413df2589679add">tbb</a><li>assertion_failure()
+: <a class="el" href="a00272.html#3d1252787be39b4aef311f1cadaff9e8">tbb</a><li>parallel_do()
+: <a class="el" href="a00280.html#g2617dc9b88b3285a7212599d49f74228">tbb</a><li>parallel_for()
+: <a class="el" href="a00279.html#490399525b1e690ec31d6db964c6b272">tbb::strict_ppl</a>, <a class="el" href="a00280.html#g04b4696b67370c01353ff5974c8f1196">tbb</a><li>parallel_for_each()
+: <a class="el" href="a00280.html#gc2d710ca573f0a9bd94379cba3772def">tbb</a><li>parallel_invoke()
+: <a class="el" href="a00280.html#gd3e2998f171494f94c2103f4eb924084">tbb</a><li>parallel_reduce()
+: <a class="el" href="a00280.html#g496bd7eadb3b97495ccb5655ef90319e">tbb</a><li>parallel_scan()
+: <a class="el" href="a00280.html#g62fde400a37bbca1a2fddc8e3d22f556">tbb</a><li>parallel_sort()
+: <a class="el" href="a00280.html#gc7576f82fdedc8a701a6c17ad9415926">tbb</a><li>set_assertion_handler()
+: <a class="el" href="a00272.html#823fa1c15dd829d1d9167157450ddcd9">tbb</a><li>TBB_runtime_interface_version()
+: <a class="el" href="a00272.html#a6858b22e90041c9c4669674ff39b056">tbb</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_type.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespacemembers_type.html
new file mode 100644 (file)
index 0000000..fa5dce7
--- /dev/null
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Class Members</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li id="current"><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li><a href="namespacemembers.html"><span>All</span></a></li>
+    <li><a href="namespacemembers_func.html"><span>Functions</span></a></li>
+    <li id="current"><a href="namespacemembers_type.html"><span>Typedefs</span></a></li>
+    <li><a href="namespacemembers_enum.html"><span>Enumerations</span></a></li>
+    <li><a href="namespacemembers_eval.html"><span>Enumerator</span></a></li>
+  </ul>
+</div>
+&nbsp;
+<p>
+<ul>
+<li>assertion_handler_type
+: <a class="el" href="a00272.html#ed375248ff6019a70ca0f9da528e5d0b">tbb</a></ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespaces.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/namespaces.html
new file mode 100644 (file)
index 0000000..69cdc43
--- /dev/null
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Namespace Index</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li id="current"><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="tabs">
+  <ul>
+    <li id="current"><a href="namespaces.html"><span>Namespace List</span></a></li>
+    <li><a href="namespacemembers.html"><span>Namespace&nbsp;Members</span></a></li>
+  </ul></div>
+<h1>Namespace List</h1>Here is a list of all documented namespaces with brief descriptions:<table>
+  <tr><td class="indexkey"><a class="el" href="a00272.html">tbb</a></td><td class="indexvalue">The namespace tbb contains all components of the library </td></tr>
+  <tr><td class="indexkey"><a class="el" href="a00279.html">tbb::strict_ppl</a></td><td class="indexvalue">For internal use only </td></tr>
+</table>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/pages.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/pages.html
new file mode 100644 (file)
index 0000000..e2061d1
--- /dev/null
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Page Index</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li id="current"><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1>Related Pages</h1>Here is a list of all related documentation pages:<ul>
+<li><a class="el" href="parallel_reduce_lambda_req.html">Requirements on parallel_reduce anonymous function objects (lambda functions)</a>
+
+<li><a class="el" href="deprecated.html">Deprecated List</a>
+
+</ul>
+<hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_do_body_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_do_body_req.html
new file mode 100644 (file)
index 0000000..a434c11
--- /dev/null
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on parallel_do body</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="parallel_do_body_req">Requirements on parallel_do body</a></h1>Class <code>Body</code> implementing the concept of parallel_do body must define: -<div class="fragment"><pre class="fragment">        B::operator()( 
+                cv_item_type item,
+                parallel_do_feeder&lt;item_type&gt;&amp; feeder
+        ) <span class="keyword">const</span>
+        
+        OR
+
+        B::operator()( cv_item_type&amp; item ) <span class="keyword">const</span>
+</pre></div> Process item. May be invoked concurrently for the same <code>this</code> but different <code>item</code>.<p>
+-<div class="fragment"><pre class="fragment"> item_type( <span class="keyword">const</span> item_type&amp; ) 
+</pre></div> Copy a work item. -<div class="fragment"><pre class="fragment"> ~item_type() 
+</pre></div> Destroy a work item <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_for_body_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_for_body_req.html
new file mode 100644 (file)
index 0000000..8a7fa48
--- /dev/null
@@ -0,0 +1,30 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on parallel_for body</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="parallel_for_body_req">Requirements on parallel_for body</a></h1>Class <code>Body</code> implementing the concept of parallel_for body must define: -<div class="fragment"><pre class="fragment"> Body::Body( <span class="keyword">const</span> Body&amp; ); 
+</pre></div> Copy constructor -<div class="fragment"><pre class="fragment"> Body::~Body(); 
+</pre></div> Destructor -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::operator()( Range&amp; r ) <span class="keyword">const</span>; 
+</pre></div> Function call operator applying the body to range <code>r</code>. <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_body_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_body_req.html
new file mode 100644 (file)
index 0000000..e152812
--- /dev/null
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on parallel_reduce body</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="parallel_reduce_body_req">Requirements on parallel_reduce body</a></h1>Class <code>Body</code> implementing the concept of parallel_reduce body must define: -<div class="fragment"><pre class="fragment"> Body::Body( Body&amp;, split ); 
+</pre></div> Splitting constructor. Must be able to run concurrently with operator() and method <code>join</code> -<div class="fragment"><pre class="fragment"> Body::~Body(); 
+</pre></div> Destructor -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::operator()( Range&amp; r ); 
+</pre></div> Function call operator applying body to range <code>r</code> and accumulating the result -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::join( Body&amp; b ); 
+</pre></div> Join results. The result in <code>b</code> should be merged into the result of <code>this</code> <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_lambda_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_reduce_lambda_req.html
new file mode 100644 (file)
index 0000000..9892b0c
--- /dev/null
@@ -0,0 +1,25 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on parallel_reduce anonymous function objects (lambda functions)</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<h1><a class="anchor" name="parallel_reduce_lambda_req">Requirements on parallel_reduce anonymous function objects (lambda functions)</a></h1>TO BE DOCUMENTED <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_scan_body_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_scan_body_req.html
new file mode 100644 (file)
index 0000000..861bb6e
--- /dev/null
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on parallel_scan body</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="parallel_scan_body_req">Requirements on parallel_scan body</a></h1>Class <code>Body</code> implementing the concept of parallel_reduce body must define: -<div class="fragment"><pre class="fragment"> Body::Body( Body&amp;, split ); 
+</pre></div> Splitting constructor. Split <code>b</code> so that <code>this</code> and <code>b</code> can accumulate separately -<div class="fragment"><pre class="fragment"> Body::~Body(); 
+</pre></div> Destructor -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::operator()( <span class="keyword">const</span> Range&amp; r, pre_scan_tag ); 
+</pre></div> Preprocess iterations for range <code>r</code> -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::operator()( <span class="keyword">const</span> Range&amp; r, final_scan_tag ); 
+</pre></div> Do final processing for iterations of range <code>r</code> -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> Body::reverse_join( Body&amp; a ); 
+</pre></div> Merge preprocessing state of <code>a</code> into <code>this</code>, where <code>a</code> was created earlier from <code>b</code> by b's splitting constructor <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_sort_iter_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/parallel_sort_iter_req.html
new file mode 100644 (file)
index 0000000..df61866
--- /dev/null
@@ -0,0 +1,29 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on iterators for parallel_sort</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="parallel_sort_iter_req">Requirements on iterators for parallel_sort</a></h1>Requirements on value type <code>T</code> of <code>RandomAccessIterator</code> for <code>parallel_sort:</code> -<div class="fragment"><pre class="fragment"> <span class="keywordtype">void</span> swap( T&amp; x, T&amp; y ) 
+</pre></div> Swaps <code>x</code> and <code>y</code> -<div class="fragment"><pre class="fragment"> <span class="keywordtype">bool</span> Compare::operator()( <span class="keyword">const</span> T&amp; x, <span class="keyword">const</span> T&amp; y ) 
+</pre></div> True if x comes before y; <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/range_req.html b/deal.II/contrib/tbb/tbb30_104oss/doc/html/range_req.html
new file mode 100644 (file)
index 0000000..a8f667d
--- /dev/null
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html><head><meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
+<title>Requirements on range concept</title>
+<link href="doxygen.css" rel="stylesheet" type="text/css">
+<link href="tabs.css" rel="stylesheet" type="text/css">
+</head><body>
+<!-- Generated by Doxygen 1.4.7 -->
+<div class="tabs">
+  <ul>
+    <li><a href="index.html"><span>Main&nbsp;Page</span></a></li>
+    <li><a href="modules.html"><span>Modules</span></a></li>
+    <li><a href="namespaces.html"><span>Namespaces</span></a></li>
+    <li><a href="annotated.html"><span>Classes</span></a></li>
+    <li><a href="files.html"><span>Files</span></a></li>
+    <li><a href="pages.html"><span>Related&nbsp;Pages</span></a></li>
+  </ul></div>
+<div class="nav">
+<a class="el" href="index.html">Main Page</a>&nbsp;&raquo&nbsp;<a class="el" href="concepts.html">TBB concepts</a></div>
+<h1><a class="anchor" name="range_req">Requirements on range concept</a></h1>Class <code>R</code> implementing the concept of range must define: -<div class="fragment"><pre class="fragment"> R::R( <span class="keyword">const</span> R&amp; ); 
+</pre></div> Copy constructor -<div class="fragment"><pre class="fragment"> R::~R(); 
+</pre></div> Destructor -<div class="fragment"><pre class="fragment"> <span class="keywordtype">bool</span> R::is_divisible() <span class="keyword">const</span>; 
+</pre></div> True if range can be partitioned into two subranges -<div class="fragment"><pre class="fragment"> <span class="keywordtype">bool</span> R::empty() <span class="keyword">const</span>; 
+</pre></div> True if range is empty -<div class="fragment"><pre class="fragment"> R::R( R&amp; r, split ); 
+</pre></div> Split range <code>r</code> into two subranges. <hr>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
+registered trademarks or trademarks of Intel Corporation or its
+subsidiaries in the United States and other countries.
+<p></p>
+* Other names and brands may be claimed as the property of others.
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_b.gif b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_b.gif
new file mode 100644 (file)
index 0000000..0d62348
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_b.gif differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_l.gif b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_l.gif
new file mode 100644 (file)
index 0000000..9b1e633
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_l.gif differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_r.gif b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_r.gif
new file mode 100644 (file)
index 0000000..ce9dd9f
Binary files /dev/null and b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tab_r.gif differ
diff --git a/deal.II/contrib/tbb/tbb30_104oss/doc/html/tabs.css b/deal.II/contrib/tbb/tbb30_104oss/doc/html/tabs.css
new file mode 100644 (file)
index 0000000..a61552a
--- /dev/null
@@ -0,0 +1,102 @@
+/* tabs styles, based on http://www.alistapart.com/articles/slidingdoors */
+
+DIV.tabs
+{
+   float            : left;
+   width            : 100%;
+   background       : url("tab_b.gif") repeat-x bottom;
+   margin-bottom    : 4px;
+}
+
+DIV.tabs UL
+{
+   margin           : 0px;
+   padding-left     : 10px;
+   list-style       : none;
+}
+
+DIV.tabs LI, DIV.tabs FORM
+{
+   display          : inline;
+   margin           : 0px;
+   padding          : 0px;
+}
+
+DIV.tabs FORM
+{
+   float            : right;
+}
+
+DIV.tabs A
+{
+   float            : left;
+   background       : url("tab_r.gif") no-repeat right top;
+   border-bottom    : 1px solid #84B0C7;
+   font-size        : x-small;
+   font-weight      : bold;
+   text-decoration  : none;
+}
+
+DIV.tabs A:hover
+{
+   background-position: 100% -150px;
+}
+
+DIV.tabs A:link, DIV.tabs A:visited,
+DIV.tabs A:active, DIV.tabs A:hover
+{
+       color: #1A419D;
+}
+
+DIV.tabs SPAN
+{
+   float            : left;
+   display          : block;
+   background       : url("tab_l.gif") no-repeat left top;
+   padding          : 5px 9px;
+   white-space      : nowrap;
+}
+
+DIV.tabs INPUT
+{
+   float            : right;
+   display          : inline;
+   font-size        : 1em;
+}
+
+DIV.tabs TD
+{
+   font-size        : x-small;
+   font-weight      : bold;
+   text-decoration  : none;
+}
+
+
+
+/* Commented Backslash Hack hides rule from IE5-Mac \*/
+DIV.tabs SPAN {float : none;}
+/* End IE5-Mac hack */
+
+DIV.tabs A:hover SPAN
+{
+   background-position: 0% -150px;
+}
+
+DIV.tabs LI#current A
+{
+   background-position: 100% -150px;
+   border-width     : 0px;
+}
+
+DIV.tabs LI#current SPAN
+{
+   background-position: 0% -150px;
+   padding-bottom   : 6px;
+}
+
+DIV.nav
+{
+   background       : none;
+   border           : none;
+   border-bottom    : 1px solid #84B0C7;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/index.html b/deal.II/contrib/tbb/tbb30_104oss/include/index.html
new file mode 100644 (file)
index 0000000..dddfb9d
--- /dev/null
@@ -0,0 +1,24 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+Include files for Threading Building Blocks.
+
+<H2>Directories</H2>
+<DL>
+<DT><A HREF="tbb/index.html">tbb</A>
+<DD>Include files for Threading Building Blocks classes and functions.
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_queue_internal.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_queue_internal.h
new file mode 100644 (file)
index 0000000..c1ebc9f
--- /dev/null
@@ -0,0 +1,1016 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_queue_internal_H
+#define __TBB_concurrent_queue_internal_H
+
+#include "tbb_stddef.h"
+#include "tbb_machine.h"
+#include "atomic.h"
+#include "spin_mutex.h"
+#include "cache_aligned_allocator.h"
+#include "tbb_exception.h"
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <iterator>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+
+namespace tbb {
+
+#if !__TBB_TEMPLATE_FRIENDS_BROKEN
+
+// forward declaration
+namespace strict_ppl {
+template<typename T, typename A> class concurrent_queue;
+}
+
+template<typename T, typename A> class concurrent_bounded_queue;
+
+namespace deprecated {
+template<typename T, typename A> class concurrent_queue;
+}
+#endif
+
+//! For internal use only.
+namespace strict_ppl {
+
+//! @cond INTERNAL
+namespace internal {
+
+using namespace tbb::internal;
+
+typedef size_t ticket;
+
+template<typename T> class micro_queue ;
+template<typename T> class micro_queue_pop_finalizer ;
+template<typename T> class concurrent_queue_base_v3;
+
+//! parts of concurrent_queue_rep that do not have references to micro_queue
+/**
+ * For internal use only.
+ */
+struct concurrent_queue_rep_base : no_copy {
+    template<typename T> friend class micro_queue;
+    template<typename T> friend class concurrent_queue_base_v3;
+
+protected:
+    //! Approximately n_queue/golden ratio
+    static const size_t phi = 3;
+
+public:
+    // must be power of 2
+    static const size_t n_queue = 8;
+
+    //! Prefix on a page
+    struct page {
+        page* next;
+        uintptr_t mask; 
+    };
+
+    atomic<ticket> head_counter;
+    char pad1[NFS_MaxLineSize-sizeof(atomic<ticket>)];
+    atomic<ticket> tail_counter;
+    char pad2[NFS_MaxLineSize-sizeof(atomic<ticket>)];
+
+    //! Always a power of 2
+    size_t items_per_page;
+
+    //! Size of an item
+    size_t item_size;
+
+    //! number of invalid entries in the queue
+    atomic<size_t> n_invalid_entries;
+
+    char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic<size_t>)];
+} ;
+
+inline bool is_valid_page(const concurrent_queue_rep_base::page* p) {
+    return uintptr_t(p)>1;
+}
+
+//! Abstract class to define interface for page allocation/deallocation
+/**
+ * For internal use only.
+ */
+class concurrent_queue_page_allocator
+{
+    template<typename T> friend class micro_queue ;
+    template<typename T> friend class micro_queue_pop_finalizer ;
+protected:
+    virtual ~concurrent_queue_page_allocator() {}
+private:
+    virtual concurrent_queue_rep_base::page* allocate_page() = 0;
+    virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0;
+} ;
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+// unary minus operator applied to unsigned type, result still unsigned
+#pragma warning( push )
+#pragma warning( disable: 4146 )
+#endif
+
+//! A queue using simple locking.
+/** For efficient, this class has no constructor.  
+    The caller is expected to zero-initialize it. */
+template<typename T>
+class micro_queue : no_copy {
+    typedef concurrent_queue_rep_base::page page;
+
+    //! Class used to ensure exception-safety of method "pop" 
+    class destroyer: no_copy {
+        T& my_value;
+    public:
+        destroyer( T& value ) : my_value(value) {}
+        ~destroyer() {my_value.~T();}          
+    };
+
+    void copy_item( page& dst, size_t index, const void* src ) {
+        new( &get_ref(dst,index) ) T(*static_cast<const T*>(src)); 
+    }
+
+    void copy_item( page& dst, size_t dindex, const page& src, size_t sindex ) {
+        new( &get_ref(dst,dindex) ) T( get_ref(const_cast<page&>(src),sindex) );
+    }
+
+    void assign_and_destroy_item( void* dst, page& src, size_t index ) {
+        T& from = get_ref(src,index);
+        destroyer d(from);
+        *static_cast<T*>(dst) = from;
+    }
+
+    void spin_wait_until_my_turn( atomic<ticket>& counter, ticket k, concurrent_queue_rep_base& rb ) const ;
+
+public:
+    friend class micro_queue_pop_finalizer<T>;
+
+    struct padded_page: page {
+        //! Not defined anywhere - exists to quiet warnings.
+        padded_page(); 
+        //! Not defined anywhere - exists to quiet warnings.
+        void operator=( const padded_page& );
+        //! Must be last field.
+        T last;
+    };
+
+    static T& get_ref( page& p, size_t index ) {
+        return (&static_cast<padded_page*>(static_cast<void*>(&p))->last)[index];
+    }
+
+    atomic<page*> head_page;
+    atomic<ticket> head_counter;
+
+    atomic<page*> tail_page;
+    atomic<ticket> tail_counter;
+
+    spin_mutex page_mutex;
+    
+    void push( const void* item, ticket k, concurrent_queue_base_v3<T>& base ) ;
+
+    bool pop( void* dst, ticket k, concurrent_queue_base_v3<T>& base ) ;
+
+    micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3<T>& base ) ;
+
+    page* make_copy( concurrent_queue_base_v3<T>& base, const page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) ;
+
+    void invalidate_page_and_rethrow( ticket k ) ;
+};
+
+template<typename T>
+void micro_queue<T>::spin_wait_until_my_turn( atomic<ticket>& counter, ticket k, concurrent_queue_rep_base& rb ) const {
+    atomic_backoff backoff;
+    do {
+        backoff.pause();
+        if( counter&1 ) {
+            ++rb.n_invalid_entries;
+            throw_exception( eid_bad_last_alloc );
+        }
+    } while( counter!=k ) ;
+}
+
+template<typename T>
+void micro_queue<T>::push( const void* item, ticket k, concurrent_queue_base_v3<T>& base ) {
+    k &= -concurrent_queue_rep_base::n_queue;
+    page* p = NULL;
+    size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1);
+    if( !index ) {
+        __TBB_TRY {
+            concurrent_queue_page_allocator& pa = base;
+            p = pa.allocate_page();
+        } __TBB_CATCH (...) {
+            ++base.my_rep->n_invalid_entries;
+            invalidate_page_and_rethrow( k );
+        }
+        p->mask = 0;
+        p->next = NULL;
+    }
+    
+    if( tail_counter!=k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep );
+        
+    if( p ) {
+        spin_mutex::scoped_lock lock( page_mutex );
+        page* q = tail_page;
+        if( is_valid_page(q) )
+            q->next = p;
+        else
+            head_page = p; 
+        tail_page = p;
+    } else {
+        p = tail_page;
+    }
+   
+    __TBB_TRY {
+        copy_item( *p, index, item );
+        // If no exception was thrown, mark item as present.
+        p->mask |= uintptr_t(1)<<index;
+        tail_counter += concurrent_queue_rep_base::n_queue; 
+    } __TBB_CATCH (...) {
+        ++base.my_rep->n_invalid_entries;
+        tail_counter += concurrent_queue_rep_base::n_queue; 
+        __TBB_RETHROW();
+    }
+}
+
+template<typename T>
+bool micro_queue<T>::pop( void* dst, ticket k, concurrent_queue_base_v3<T>& base ) {
+    k &= -concurrent_queue_rep_base::n_queue;
+    if( head_counter!=k ) spin_wait_until_eq( head_counter, k );
+    if( tail_counter==k ) spin_wait_while_eq( tail_counter, k );
+    page& p = *head_page;
+    __TBB_ASSERT( &p, NULL );
+    size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1);
+    bool success = false; 
+    {
+        micro_queue_pop_finalizer<T> finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); 
+        if( p.mask & uintptr_t(1)<<index ) {
+            success = true;
+            assign_and_destroy_item( dst, p, index );
+        } else {
+            --base.my_rep->n_invalid_entries;
+        }
+    }
+    return success;
+}
+
+template<typename T>
+micro_queue<T>& micro_queue<T>::assign( const micro_queue<T>& src, concurrent_queue_base_v3<T>& base ) {
+    head_counter = src.head_counter;
+    tail_counter = src.tail_counter;
+    page_mutex   = src.page_mutex;
+
+    const page* srcp = src.head_page;
+    if( is_valid_page(srcp) ) {
+        ticket g_index = head_counter;
+        __TBB_TRY {
+            size_t n_items  = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue;
+            size_t index = head_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1);
+            size_t end_in_first_page = (index+n_items<base.my_rep->items_per_page)?(index+n_items):base.my_rep->items_per_page;
+
+            head_page = make_copy( base, srcp, index, end_in_first_page, g_index );
+            page* cur_page = head_page;
+
+            if( srcp != src.tail_page ) {
+                for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) {
+                    cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index );
+                    cur_page = cur_page->next;
+                }
+
+                __TBB_ASSERT( srcp==src.tail_page, NULL );
+                size_t last_index = tail_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1);
+                if( last_index==0 ) last_index = base.my_rep->items_per_page;
+
+                cur_page->next = make_copy( base, srcp, 0, last_index, g_index );
+                cur_page = cur_page->next;
+            }
+            tail_page = cur_page;
+        } __TBB_CATCH (...) {
+            invalidate_page_and_rethrow( g_index );
+        }
+    } else {
+        head_page = tail_page = NULL;
+    }
+    return *this;
+}
+
+template<typename T>
+void micro_queue<T>::invalidate_page_and_rethrow( ticket k ) {
+    // Append an invalid page at address 1 so that no more pushes are allowed.
+    page* invalid_page = (page*)uintptr_t(1);
+    {
+        spin_mutex::scoped_lock lock( page_mutex );
+        tail_counter = k+concurrent_queue_rep_base::n_queue+1;
+        page* q = tail_page;
+        if( is_valid_page(q) )
+            q->next = invalid_page;
+        else
+            head_page = invalid_page;
+        tail_page = invalid_page;
+    }
+    __TBB_RETHROW();
+}
+
+template<typename T>
+concurrent_queue_rep_base::page* micro_queue<T>::make_copy( concurrent_queue_base_v3<T>& base, const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) {
+    concurrent_queue_page_allocator& pa = base;
+    page* new_page = pa.allocate_page();
+    new_page->next = NULL;
+    new_page->mask = src_page->mask;
+    for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )
+        if( new_page->mask & uintptr_t(1)<<begin_in_page )
+            copy_item( *new_page, begin_in_page, *src_page, begin_in_page );
+    return new_page;
+}
+
+template<typename T>
+class micro_queue_pop_finalizer: no_copy {
+    typedef concurrent_queue_rep_base::page page;
+    ticket my_ticket;
+    micro_queue<T>& my_queue;
+    page* my_page; 
+    concurrent_queue_page_allocator& allocator;
+public:
+    micro_queue_pop_finalizer( micro_queue<T>& queue, concurrent_queue_base_v3<T>& b, ticket k, page* p ) :
+        my_ticket(k), my_queue(queue), my_page(p), allocator(b)
+    {}
+    ~micro_queue_pop_finalizer() ;
+};
+
+template<typename T>
+micro_queue_pop_finalizer<T>::~micro_queue_pop_finalizer() {
+    page* p = my_page;
+    if( is_valid_page(p) ) {
+        spin_mutex::scoped_lock lock( my_queue.page_mutex );
+        page* q = p->next;
+        my_queue.head_page = q;
+        if( !is_valid_page(q) ) {
+            my_queue.tail_page = NULL;
+        }
+    }
+    my_queue.head_counter = my_ticket;
+    if( is_valid_page(p) ) {
+        allocator.deallocate_page( p );
+    }
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning( pop )
+#endif // warning 4146 is back
+
+template<typename T> class concurrent_queue_iterator_rep ;
+template<typename T> class concurrent_queue_iterator_base_v3;
+
+//! representation of concurrent_queue_base
+/**
+ * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue<T>'s
+ */
+template<typename T>
+struct concurrent_queue_rep : public concurrent_queue_rep_base {
+    micro_queue<T> array[n_queue];
+
+    //! Map ticket to an array index
+    static size_t index( ticket k ) {
+        return k*phi%n_queue;
+    }
+
+    micro_queue<T>& choose( ticket k ) {
+        // The formula here approximates LRU in a cache-oblivious way.
+        return array[index(k)];
+    }
+};
+
+//! base class of concurrent_queue
+/**
+ * The class implements the interface defined by concurrent_queue_page_allocator
+ * and has a pointer to an instance of concurrent_queue_rep.
+ */
+template<typename T>
+class concurrent_queue_base_v3: public concurrent_queue_page_allocator {
+    //! Internal representation
+    concurrent_queue_rep<T>* my_rep;
+
+    friend struct concurrent_queue_rep<T>;
+    friend class micro_queue<T>;
+    friend class concurrent_queue_iterator_rep<T>;
+    friend class concurrent_queue_iterator_base_v3<T>;
+
+protected:
+    typedef typename concurrent_queue_rep<T>::page page;
+
+private:
+    typedef typename micro_queue<T>::padded_page padded_page;
+
+    /* override */ virtual page *allocate_page() {
+        concurrent_queue_rep<T>& r = *my_rep;
+        size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T);
+        return reinterpret_cast<page*>(allocate_block ( n ));
+    }
+
+    /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) {
+        concurrent_queue_rep<T>& r = *my_rep;
+        size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T);
+        deallocate_block( reinterpret_cast<void*>(p), n );
+    }
+
+    //! custom allocator
+    virtual void *allocate_block( size_t n ) = 0;
+
+    //! custom de-allocator
+    virtual void deallocate_block( void *p, size_t n ) = 0;
+
+protected:
+    concurrent_queue_base_v3();
+
+    /* override */ virtual ~concurrent_queue_base_v3() {
+#if __TBB_USE_ASSERT
+        size_t nq = my_rep->n_queue;
+        for( size_t i=0; i<nq; i++ )
+            __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" );
+#endif /* __TBB_USE_ASSERT */
+        cache_aligned_allocator<concurrent_queue_rep<T> >().deallocate(my_rep,1);
+    }
+
+    //! Enqueue item at tail of queue
+    void internal_push( const void* src ) {
+        concurrent_queue_rep<T>& r = *my_rep;
+        ticket k = r.tail_counter++;
+        r.choose(k).push( src, k, *this );
+    }
+
+    //! Attempt to dequeue item from queue.
+    /** NULL if there was no item to dequeue. */
+    bool internal_try_pop( void* dst ) ;
+
+    //! Get size of queue; result may be invalid if queue is modified concurrently
+    size_t internal_size() const ;
+
+    //! check if the queue is empty; thread safe
+    bool internal_empty() const ;
+
+    //! free any remaining pages
+    /* note that the name may be misleading, but it remains so due to a historical accident. */
+    void internal_finish_clear() ;
+
+    //! Obsolete
+    void internal_throw_exception() const {
+        throw_exception( eid_bad_alloc );
+    }
+
+    //! copy internal representation
+    void assign( const concurrent_queue_base_v3& src ) ;
+};
+
+template<typename T>
+concurrent_queue_base_v3<T>::concurrent_queue_base_v3() {
+    const size_t item_size = sizeof(T);
+    my_rep = cache_aligned_allocator<concurrent_queue_rep<T> >().allocate(1);
+    __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" );
+    memset(my_rep,0,sizeof(concurrent_queue_rep<T>));
+    my_rep->item_size = item_size;
+    my_rep->items_per_page = item_size<=8 ? 32 :
+                             item_size<=16 ? 16 : 
+                             item_size<=32 ? 8 :
+                             item_size<=64 ? 4 :
+                             item_size<=128 ? 2 :
+                             1;
+}
+
+template<typename T>
+bool concurrent_queue_base_v3<T>::internal_try_pop( void* dst ) {
+    concurrent_queue_rep<T>& r = *my_rep;
+    ticket k;
+    do {
+        k = r.head_counter;
+        for(;;) {
+            if( r.tail_counter<=k ) {
+                // Queue is empty 
+                return false;
+            }
+            // Queue had item with ticket k when we looked.  Attempt to get that item.
+            ticket tk=k;
+#if defined(_MSC_VER) && defined(_Wp64)
+    #pragma warning (push)
+    #pragma warning (disable: 4267)
+#endif
+            k = r.head_counter.compare_and_swap( tk+1, tk );
+#if defined(_MSC_VER) && defined(_Wp64)
+    #pragma warning (pop)
+#endif
+            if( k==tk )
+                break;
+            // Another thread snatched the item, retry.
+        }
+    } while( !r.choose( k ).pop( dst, k, *this ) );
+    return true;
+}
+
+template<typename T>
+size_t concurrent_queue_base_v3<T>::internal_size() const {
+    concurrent_queue_rep<T>& r = *my_rep;
+    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
+    ticket hc = r.head_counter;
+    size_t nie = r.n_invalid_entries;
+    ticket tc = r.tail_counter;
+    __TBB_ASSERT( hc!=tc || !nie, NULL );
+    ptrdiff_t sz = tc-hc-nie;
+    return sz<0 ? 0 :  size_t(sz);
+}
+
+template<typename T>
+bool concurrent_queue_base_v3<T>::internal_empty() const {
+    concurrent_queue_rep<T>& r = *my_rep;
+    ticket tc = r.tail_counter;
+    ticket hc = r.head_counter;
+    // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.
+    return tc==r.tail_counter && tc==hc+r.n_invalid_entries ;
+}
+
+template<typename T>
+void concurrent_queue_base_v3<T>::internal_finish_clear() {
+    concurrent_queue_rep<T>& r = *my_rep;
+    size_t nq = r.n_queue;
+    for( size_t i=0; i<nq; ++i ) {
+        page* tp = r.array[i].tail_page;
+        if( is_valid_page(tp) ) {
+            __TBB_ASSERT( r.array[i].head_page==tp, "at most one page should remain" );
+            deallocate_page( tp );
+            r.array[i].tail_page = NULL;
+        } else 
+            __TBB_ASSERT( !is_valid_page(r.array[i].head_page), "head page pointer corrupt?" );
+    }
+}
+
+template<typename T>
+void concurrent_queue_base_v3<T>::assign( const concurrent_queue_base_v3& src ) {
+    concurrent_queue_rep<T>& r = *my_rep;
+    r.items_per_page = src.my_rep->items_per_page;
+
+    // copy concurrent_queue_rep.
+    r.head_counter = src.my_rep->head_counter;
+    r.tail_counter = src.my_rep->tail_counter;
+    r.n_invalid_entries = src.my_rep->n_invalid_entries;
+
+    // copy micro_queues
+    for( size_t i = 0; i<r.n_queue; ++i )
+        r.array[i].assign( src.my_rep->array[i], *this);
+
+    __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, 
+            "the source concurrent queue should not be concurrently modified." );
+}
+
+template<typename Container, typename Value> class concurrent_queue_iterator;
+
+template<typename T>
+class concurrent_queue_iterator_rep: no_assign {
+    typedef typename micro_queue<T>::padded_page padded_page;
+public:
+    ticket head_counter;
+    const concurrent_queue_base_v3<T>& my_queue;
+    typename concurrent_queue_base_v3<T>::page* array[concurrent_queue_rep<T>::n_queue];
+    concurrent_queue_iterator_rep( const concurrent_queue_base_v3<T>& queue ) :
+        head_counter(queue.my_rep->head_counter),
+        my_queue(queue)
+    {
+        for( size_t k=0; k<concurrent_queue_rep<T>::n_queue; ++k )
+            array[k] = queue.my_rep->array[k].head_page;
+    }
+
+    //! Set item to point to kth element.  Return true if at end of queue or item is marked valid; false otherwise.
+    bool get_item( T*& item, size_t k ) ;
+};
+
+template<typename T>
+bool concurrent_queue_iterator_rep<T>::get_item( T*& item, size_t k ) {
+    if( k==my_queue.my_rep->tail_counter ) {
+        item = NULL;
+        return true;
+    } else {
+        typename concurrent_queue_base_v3<T>::page* p = array[concurrent_queue_rep<T>::index(k)];
+        __TBB_ASSERT(p,NULL);
+        size_t i = k/concurrent_queue_rep<T>::n_queue & (my_queue.my_rep->items_per_page-1);
+        item = &micro_queue<T>::get_ref(*p,i);
+        return (p->mask & uintptr_t(1)<<i)!=0;
+    }
+}
+
+//! Constness-independent portion of concurrent_queue_iterator.
+/** @ingroup containers */
+template<typename Value>
+class concurrent_queue_iterator_base_v3 : no_assign {
+    //! Represents concurrent_queue over which we are iterating.
+    /** NULL if one past last element in queue. */
+    concurrent_queue_iterator_rep<Value>* my_rep;
+
+    template<typename C, typename T, typename U>
+    friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+
+    template<typename C, typename T, typename U>
+    friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+protected:
+    //! Pointer to current item
+    Value* my_item;
+
+    //! Default constructor
+    concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {
+#if __GNUC__==4&&__GNUC_MINOR__==3
+        // to get around a possible gcc 4.3 bug
+        __TBB_release_consistency_helper();
+#endif
+    }
+
+    //! Copy constructor
+    concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i )
+    : no_assign(), my_rep(NULL), my_item(NULL) {
+        assign(i);
+    }
+
+    //! Construct iterator pointing to head of queue.
+    concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value>& queue ) ;
+
+    //! Assignment
+    void assign( const concurrent_queue_iterator_base_v3<Value>& other ) ;
+
+    //! Advance iterator one step towards tail of queue.
+    void advance() ;
+
+    //! Destructor
+    ~concurrent_queue_iterator_base_v3() {
+        cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().deallocate(my_rep, 1);
+        my_rep = NULL;
+    }
+};
+
+template<typename Value>
+concurrent_queue_iterator_base_v3<Value>::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value>& queue ) {
+    my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().allocate(1);
+    new( my_rep ) concurrent_queue_iterator_rep<Value>(queue);
+    size_t k = my_rep->head_counter;
+    if( !my_rep->get_item(my_item, k) ) advance();
+}
+
+template<typename Value>
+void concurrent_queue_iterator_base_v3<Value>::assign( const concurrent_queue_iterator_base_v3<Value>& other ) {
+    if( my_rep!=other.my_rep ) {
+        if( my_rep ) {
+            cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().deallocate(my_rep, 1);
+            my_rep = NULL;
+        }
+        if( other.my_rep ) {
+            my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep<Value> >().allocate(1);
+            new( my_rep ) concurrent_queue_iterator_rep<Value>( *other.my_rep );
+        }
+    }
+    my_item = other.my_item;
+}
+
+template<typename Value>
+void concurrent_queue_iterator_base_v3<Value>::advance() {
+    __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" );  
+    size_t k = my_rep->head_counter;
+    const concurrent_queue_base_v3<Value>& queue = my_rep->my_queue;
+#if TBB_USE_ASSERT
+    Value* tmp;
+    my_rep->get_item(tmp,k);
+    __TBB_ASSERT( my_item==tmp, NULL );
+#endif /* TBB_USE_ASSERT */
+    size_t i = k/concurrent_queue_rep<Value>::n_queue & (queue.my_rep->items_per_page-1);
+    if( i==queue.my_rep->items_per_page-1 ) {
+        typename concurrent_queue_base_v3<Value>::page*& root = my_rep->array[concurrent_queue_rep<Value>::index(k)];
+        root = root->next;
+    }
+    // advance k
+    my_rep->head_counter = ++k;
+    if( !my_rep->get_item(my_item, k) ) advance();
+}
+
+//! Similar to C++0x std::remove_cv
+/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */
+template<typename T> struct tbb_remove_cv {typedef T type;};
+template<typename T> struct tbb_remove_cv<const T> {typedef T type;};
+template<typename T> struct tbb_remove_cv<volatile T> {typedef T type;};
+template<typename T> struct tbb_remove_cv<const volatile T> {typedef T type;};
+
+//! Meets requirements of a forward iterator for STL.
+/** Value is either the T or const T type of the container.
+    @ingroup containers */
+template<typename Container, typename Value>
+class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>,
+        public std::iterator<std::forward_iterator_tag,Value> {
+#if !__TBB_TEMPLATE_FRIENDS_BROKEN
+    template<typename T, class A>
+    friend class ::tbb::strict_ppl::concurrent_queue;
+#else
+public: // workaround for MSVC
+#endif 
+    //! Construct iterator pointing to head of queue.
+    concurrent_queue_iterator( const concurrent_queue_base_v3<Value>& queue ) :
+        concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>(queue)
+    {
+    }
+
+public:
+    concurrent_queue_iterator() {}
+
+    concurrent_queue_iterator( const concurrent_queue_iterator<Container,typename Container::value_type>& other ) :
+        concurrent_queue_iterator_base_v3<typename tbb_remove_cv<Value>::type>(other)
+    {}
+
+    //! Iterator assignment
+    concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) {
+        this->assign(other);
+        return *this;
+    }
+
+    //! Reference to current item 
+    Value& operator*() const {
+        return *static_cast<Value*>(this->my_item);
+    }
+
+    Value* operator->() const {return &operator*();}
+
+    //! Advance to next item in queue
+    concurrent_queue_iterator& operator++() {
+        this->advance();
+        return *this;
+    }
+
+    //! Post increment
+    Value* operator++(int) {
+        Value* result = &operator*();
+        operator++();
+        return result;
+    }
+}; // concurrent_queue_iterator
+
+
+template<typename C, typename T, typename U>
+bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item==j.my_item;
+}
+
+template<typename C, typename T, typename U>
+bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item!=j.my_item;
+}
+
+} // namespace internal
+
+//! @endcond
+
+} // namespace strict_ppl
+
+//! @cond INTERNAL
+namespace internal {
+
+class concurrent_queue_rep;
+class concurrent_queue_iterator_rep;
+class concurrent_queue_iterator_base_v3;
+template<typename Container, typename Value> class concurrent_queue_iterator;
+
+//! For internal use only.
+/** Type-independent portion of concurrent_queue.
+    @ingroup containers */
+class concurrent_queue_base_v3: no_copy {
+    //! Internal representation
+    concurrent_queue_rep* my_rep;
+
+    friend class concurrent_queue_rep;
+    friend struct micro_queue;
+    friend class micro_queue_pop_finalizer;
+    friend class concurrent_queue_iterator_rep;
+    friend class concurrent_queue_iterator_base_v3;
+protected:
+    //! Prefix on a page
+    struct page {
+        page* next;
+        uintptr_t mask; 
+    };
+
+    //! Capacity of the queue
+    ptrdiff_t my_capacity;
+   
+    //! Always a power of 2
+    size_t items_per_page;
+
+    //! Size of an item
+    size_t item_size;
+
+#if __TBB_GCC_3_3_PROTECTED_BROKEN
+public:
+#endif
+    template<typename T>
+    struct padded_page: page {
+        //! Not defined anywhere - exists to quiet warnings.
+        padded_page(); 
+        //! Not defined anywhere - exists to quiet warnings.
+        void operator=( const padded_page& );
+        //! Must be last field.
+        T last;
+    };
+
+private:
+    virtual void copy_item( page& dst, size_t index, const void* src ) = 0;
+    virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0;
+protected:
+    __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size );
+    virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3();
+
+    //! Enqueue item at tail of queue
+    void __TBB_EXPORTED_METHOD internal_push( const void* src );
+
+    //! Dequeue item from head of queue
+    void __TBB_EXPORTED_METHOD internal_pop( void* dst );
+
+    //! Attempt to enqueue item onto queue.
+    bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src );
+
+    //! Attempt to dequeue item from queue.
+    /** NULL if there was no item to dequeue. */
+    bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst );
+
+    //! Get size of queue
+    ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const;
+
+    //! Check if the queue is emtpy
+    bool __TBB_EXPORTED_METHOD internal_empty() const;
+
+    //! Set the queue capacity
+    void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size );
+
+    //! custom allocator
+    virtual page *allocate_page() = 0;
+
+    //! custom de-allocator
+    virtual void deallocate_page( page *p ) = 0;
+
+    //! free any remaining pages
+    /* note that the name may be misleading, but it remains so due to a historical accident. */
+    void __TBB_EXPORTED_METHOD internal_finish_clear() ;
+
+    //! throw an exception
+    void __TBB_EXPORTED_METHOD internal_throw_exception() const;
+
+    //! copy internal representation
+    void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ;
+
+private:
+    virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0;
+};
+
+//! Type-independent portion of concurrent_queue_iterator.
+/** @ingroup containers */
+class concurrent_queue_iterator_base_v3 {
+    //! concurrent_queue over which we are iterating.
+    /** NULL if one past last element in queue. */
+    concurrent_queue_iterator_rep* my_rep;
+
+    template<typename C, typename T, typename U>
+    friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+
+    template<typename C, typename T, typename U>
+    friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+
+    void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data );
+protected:
+    //! Pointer to current item
+    void* my_item;
+
+    //! Default constructor
+    concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {}
+
+    //! Copy constructor
+    concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) {
+        assign(i);
+    }
+
+    //! Obsolete entry point for constructing iterator pointing to head of queue.
+    /** Does not work correctly for SSE types. */
+    __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue );
+
+    //! Construct iterator pointing to head of queue.
+    __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data );
+
+    //! Assignment
+    void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i );
+
+    //! Advance iterator one step towards tail of queue.
+    void __TBB_EXPORTED_METHOD advance();
+
+    //! Destructor
+    __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3();
+};
+
+typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base;
+
+//! Meets requirements of a forward iterator for STL.
+/** Value is either the T or const T type of the container.
+    @ingroup containers */
+template<typename Container, typename Value>
+class concurrent_queue_iterator: public concurrent_queue_iterator_base,
+        public std::iterator<std::forward_iterator_tag,Value> {
+
+#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+    template<typename T, class A>
+    friend class ::tbb::concurrent_bounded_queue;
+
+    template<typename T, class A>
+    friend class ::tbb::deprecated::concurrent_queue;
+#else
+public: // workaround for MSVC
+#endif 
+    //! Construct iterator pointing to head of queue.
+    concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) :
+        concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page<Value>,last))
+    {
+    }
+
+public:
+    concurrent_queue_iterator() {}
+
+    /** If Value==Container::value_type, then this routine is the copy constructor. 
+        If Value==const Container::value_type, then this routine is a conversion constructor. */
+    concurrent_queue_iterator( const concurrent_queue_iterator<Container,typename Container::value_type>& other ) :
+        concurrent_queue_iterator_base_v3(other)
+    {}
+
+    //! Iterator assignment
+    concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) {
+        assign(other);
+        return *this;
+    }
+
+    //! Reference to current item 
+    Value& operator*() const {
+        return *static_cast<Value*>(my_item);
+    }
+
+    Value* operator->() const {return &operator*();}
+
+    //! Advance to next item in queue
+    concurrent_queue_iterator& operator++() {
+        advance();
+        return *this;
+    }
+
+    //! Post increment
+    Value* operator++(int) {
+        Value* result = &operator*();
+        operator++();
+        return result;
+    }
+}; // concurrent_queue_iterator
+
+
+template<typename C, typename T, typename U>
+bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item==j.my_item;
+}
+
+template<typename C, typename T, typename U>
+bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item!=j.my_item;
+}
+
+} // namespace internal;
+
+//! @endcond
+
+} // namespace tbb
+
+#endif /* __TBB_concurrent_queue_internal_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h
new file mode 100644 (file)
index 0000000..1a9a9a5
--- /dev/null
@@ -0,0 +1,1408 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/* Container implementations in this header are based on PPL implementations 
+   provided by Microsoft. */
+
+#ifndef __TBB_concurrent_unordered_internal_H
+#define __TBB_concurrent_unordered_internal_H
+
+#include "tbb_stddef.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <iterator>
+#include <utility>      // Need std::pair
+#include <functional>
+#include <string>       // For tbb_hasher
+#include <cstring>      // Need std::memset
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "atomic.h"
+#include "tbb_exception.h"
+#include "tbb_allocator.h"
+
+namespace tbb {
+namespace interface5 {
+//! @cond INTERNAL
+namespace internal {
+
+template <typename T, typename Allocator>
+class split_ordered_list;
+template <typename Traits>
+class concurrent_unordered_base;
+
+// Forward list iterators (without skipping dummy elements)
+template<class Solist, typename Value>
+class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
+{
+    template <typename T, typename Allocator>
+    friend class split_ordered_list;
+    template <typename Traits>
+    friend class concurrent_unordered_base;
+    template<class M, typename V>
+    friend class flist_iterator;
+
+    typedef typename Solist::nodeptr_t nodeptr_t;
+public:
+    typedef typename Solist::value_type value_type;
+    typedef typename Solist::difference_type difference_type;
+    typedef typename Solist::pointer pointer;
+    typedef typename Solist::reference reference;
+
+    flist_iterator() : my_node_ptr(0) {}
+    flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
+        : my_node_ptr(other.my_node_ptr) {}
+
+    reference operator*() const { return my_node_ptr->my_element; }
+    pointer operator->() const { return &**this; }
+
+    flist_iterator& operator++() {
+        my_node_ptr = my_node_ptr->my_next;
+        return *this;
+    }
+
+    flist_iterator operator++(int) {
+        flist_iterator tmp = *this;
+        ++*this;
+        return tmp;
+    }
+
+protected:
+    flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
+    nodeptr_t get_node_ptr() const { return my_node_ptr; }
+
+    nodeptr_t my_node_ptr;
+
+    template<typename M, typename T, typename U>
+    friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
+    template<typename M, typename T, typename U>
+    friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
+};
+
+template<typename Solist, typename T, typename U>
+bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
+    return i.my_node_ptr == j.my_node_ptr;
+}
+template<typename Solist, typename T, typename U>
+bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
+    return i.my_node_ptr != j.my_node_ptr;
+}
+
+// Split-order list iterators, needed to skip dummy elements
+template<class Solist, typename Value>
+class solist_iterator : public flist_iterator<Solist, Value>
+{
+    typedef flist_iterator<Solist, Value> base_type;
+    typedef typename Solist::nodeptr_t nodeptr_t;
+    using base_type::get_node_ptr;
+    template <typename T, typename Allocator>
+    friend class split_ordered_list;
+    template<class M, typename V>
+    friend class solist_iterator;
+    template<typename M, typename T, typename U>
+    friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
+    template<typename M, typename T, typename U>
+    friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
+
+    const Solist *my_list_ptr;
+    solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
+
+public:
+    typedef typename Solist::value_type value_type;
+    typedef typename Solist::difference_type difference_type;
+    typedef typename Solist::pointer pointer;
+    typedef typename Solist::reference reference;
+
+    solist_iterator() {}
+    solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
+        : base_type(other), my_list_ptr(other.my_list_ptr) {}
+
+    reference operator*() const {
+        return this->base_type::operator*();
+    }
+
+    pointer operator->() const {
+        return (&**this);
+    }
+
+    solist_iterator& operator++() {
+        do ++(*(base_type *)this);
+        while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
+
+        return (*this);
+    }
+
+    solist_iterator operator++(int) {
+        solist_iterator tmp = *this;
+        do ++*this;
+        while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
+
+        return (tmp);
+    }
+};
+
+template<typename Solist, typename T, typename U>
+bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
+    return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
+}
+template<typename Solist, typename T, typename U>
+bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
+    return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
+}
+
+// Forward type and class definitions
+typedef size_t sokey_t;
+
+// Forward list in which elements are sorted in a split-order
+template <typename T, typename Allocator>
+class split_ordered_list
+{
+public:
+    typedef split_ordered_list<T, Allocator> self_type;
+    typedef typename Allocator::template rebind<T>::other allocator_type;
+    struct node;
+    typedef node *nodeptr_t;
+
+    typedef typename allocator_type::size_type size_type;
+    typedef typename allocator_type::difference_type difference_type;
+    typedef typename allocator_type::pointer pointer;
+    typedef typename allocator_type::const_pointer const_pointer;
+    typedef typename allocator_type::reference reference;
+    typedef typename allocator_type::const_reference const_reference;
+    typedef typename allocator_type::value_type value_type;
+
+    typedef solist_iterator<self_type, const value_type> const_iterator;
+    typedef solist_iterator<self_type, value_type> iterator;
+    typedef flist_iterator<self_type, const value_type> raw_const_iterator;
+    typedef flist_iterator<self_type, value_type> raw_iterator;
+
+    // Node that holds the element in a split-ordered list
+    struct node : tbb::internal::no_assign
+    {
+        // Initialize the node with the given order key
+        void init(sokey_t order_key) {
+            my_order_key = order_key;
+            my_next = NULL;
+        }
+
+        // Return the order key (needed for hashing)
+        sokey_t get_order_key() const { // TODO: remove
+            return my_order_key;
+        }
+
+        // Inserts the new element in the list in an atomic fashion
+        nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
+        {
+            // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
+            nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
+
+            if (exchange_node == current_node) // TODO: why this branch?
+            {
+                // Operation succeeded, return the new node
+                return new_node;
+            }
+            else
+            {
+                // Operation failed, return the "interfering" node
+                return exchange_node;
+            }
+        }
+
+        // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
+        // in the hash table to quickly index into the right subsection of the split-ordered list.
+        bool is_dummy() const {
+            return (my_order_key & 0x1) == 0;
+        }
+
+
+        nodeptr_t  my_next;      // Next element in the list
+        value_type my_element;   // Element storage
+        sokey_t    my_order_key; // Order key for this element
+    };
+
+    // Allocate a new node with the given order key and value
+    nodeptr_t create_node(sokey_t order_key, const T &value) {
+        nodeptr_t pnode = my_node_allocator.allocate(1);
+
+        __TBB_TRY {
+            new(static_cast<void*>(&pnode->my_element)) T(value);
+            pnode->init(order_key);
+        } __TBB_CATCH(...) {
+            my_node_allocator.deallocate(pnode, 1);
+            __TBB_RETHROW();
+        }
+
+        return (pnode);
+    }
+
+    // Allocate a new node with the given order key; used to allocate dummy nodes
+    nodeptr_t create_node(sokey_t order_key) {
+        nodeptr_t pnode = my_node_allocator.allocate(1);
+
+        __TBB_TRY {
+            new(static_cast<void*>(&pnode->my_element)) T();
+            pnode->init(order_key);
+        } __TBB_CATCH(...) {
+            my_node_allocator.deallocate(pnode, 1);
+            __TBB_RETHROW();
+        }
+
+        return (pnode);
+    }
+
+   split_ordered_list(allocator_type a = allocator_type())
+       : my_node_allocator(a), my_element_count(0)
+    {
+        // Immediately allocate a dummy node with order key of 0. This node
+        // will always be the head of the list.
+        my_head = create_node(0);
+    }
+
+    ~split_ordered_list()
+    {
+        // Clear the list
+        clear();
+
+        // Remove the head element which is not cleared by clear()
+        nodeptr_t pnode = my_head;
+        my_head = NULL;
+
+        __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
+
+        destroy_node(pnode);
+    }
+
+    // Common forward list functions
+
+    allocator_type get_allocator() const {
+        return (my_node_allocator);
+    }
+
+    void clear() {
+        nodeptr_t pnext;
+        nodeptr_t pnode = my_head;
+
+        __TBB_ASSERT(my_head != NULL, "Invalid head list node");
+        pnext = pnode->my_next;
+        pnode->my_next = NULL;
+        pnode = pnext;
+
+        while (pnode != NULL)
+        {
+            pnext = pnode->my_next;
+            destroy_node(pnode);
+            pnode = pnext;
+        }
+
+        my_element_count = 0;
+    }
+
+    // Returns a first non-dummy element in the SOL
+    iterator begin() {
+        return first_real_iterator(raw_begin());
+    }
+
+    // Returns a first non-dummy element in the SOL
+    const_iterator begin() const {
+        return first_real_iterator(raw_begin());
+    }
+
+    iterator end() {
+        return (iterator(0, this));
+    }
+
+    const_iterator end() const {
+        return (const_iterator(0, this));
+    }
+
+    const_iterator cbegin() const {
+        return (((const self_type *)this)->begin());
+    }
+
+    const_iterator cend() const {
+        return (((const self_type *)this)->end());
+    }
+
+    // Checks if the number of elements (non-dummy) is 0
+    bool empty() const {
+        return (my_element_count == 0);
+    }
+
+    // Returns the number of non-dummy elements in the list
+    size_type size() const {
+        return my_element_count;
+    }
+
+    // Returns the maximum size of the list, determined by the allocator
+    size_type max_size() const {
+        return my_node_allocator.max_size();
+    }
+
+    // Swaps 'this' list with the passed in one
+    void swap(self_type& other)
+    {
+        if (this == &other)
+        {
+            // Nothing to do
+            return;
+        }
+
+        std::swap(my_element_count, other.my_element_count);
+        std::swap(my_head, other.my_head);
+    }
+
+    // Split-order list functions
+
+    // Returns a first element in the SOL, which is always a dummy
+    raw_iterator raw_begin() {
+        return raw_iterator(my_head);
+    }
+
+    // Returns a first element in the SOL, which is always a dummy
+    raw_const_iterator raw_begin() const {
+        return raw_const_iterator(my_head);
+    }
+
+    raw_iterator raw_end() {
+        return raw_iterator(0);
+    }
+
+    raw_const_iterator raw_end() const {
+        return raw_const_iterator(0);
+    }
+
+    static sokey_t get_order_key(const raw_const_iterator& it) {
+        return it.get_node_ptr()->get_order_key();
+    }
+
+    static sokey_t get_safe_order_key(const raw_const_iterator& it) {
+        if( !it.get_node_ptr() ) return sokey_t(~0U);
+        return it.get_node_ptr()->get_order_key();
+    }
+
+    // Returns a public iterator version of the internal iterator. Public iterator must not
+    // be a dummy private iterator.
+    iterator get_iterator(raw_iterator it) {
+        __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
+        return iterator(it.get_node_ptr(), this);
+    }
+
+    // Returns a public iterator version of the internal iterator. Public iterator must not
+    // be a dummy private iterator.
+    const_iterator get_iterator(raw_const_iterator it) const {
+        __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
+        return const_iterator(it.get_node_ptr(), this);
+    }
+
+    // Returns a non-const version of the raw_iterator
+    raw_iterator get_iterator(raw_const_iterator it) {
+        return raw_iterator(it.get_node_ptr());
+    }
+
+    // Returns a non-const version of the iterator
+    static iterator get_iterator(const_iterator it) {
+        return iterator(it.my_node_ptr, it.my_list_ptr);
+    }
+
+    // Returns a public iterator version of a first non-dummy internal iterator at or after
+    // the passed in internal iterator.
+    iterator first_real_iterator(raw_iterator it)
+    {
+        // Skip all dummy, internal only iterators
+        while (it != raw_end() && it.get_node_ptr()->is_dummy())
+            ++it;
+
+        return iterator(it.get_node_ptr(), this);
+    }
+
+    // Returns a public iterator version of a first non-dummy internal iterator at or after
+    // the passed in internal iterator.
+    const_iterator first_real_iterator(raw_const_iterator it) const
+    {
+        // Skip all dummy, internal only iterators
+        while (it != raw_end() && it.get_node_ptr()->is_dummy())
+            ++it;
+
+        return const_iterator(it.get_node_ptr(), this);
+    }
+
+    // Erase an element using the allocator
+    void destroy_node(nodeptr_t pnode) {
+        my_node_allocator.destroy(pnode);
+        my_node_allocator.deallocate(pnode, 1);
+    }
+
+    // Try to insert a new element in the list. If insert fails, return the node that
+    // was inserted instead.
+    nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
+        new_node->my_next = current_node;
+        return previous->atomic_set_next(new_node, current_node);
+    }
+
+    // Insert a new element between passed in iterators
+    std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
+    {
+        nodeptr_t pnode = create_node(order_key, value);
+        nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
+
+        if (inserted_node == pnode)
+        {
+            // If the insert succeeded, check that the order is correct and increment the element count
+            check_range();
+            *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
+            return std::pair<iterator, bool>(iterator(pnode, this), true);
+        }
+        else
+        {
+            // If the insert failed (element already there), then delete the new one
+            destroy_node(pnode);
+            return std::pair<iterator, bool>(end(), false);
+        }
+    }
+
+    // Insert a new dummy element, starting search at a parent dummy element
+    raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
+    {
+        raw_iterator last = raw_end();
+        raw_iterator where = it;
+
+        __TBB_ASSERT(where != last, "Invalid head node");
+
+        ++where;
+
+        // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
+        nodeptr_t dummy_node = create_node(order_key);
+
+        for (;;)
+        {
+            __TBB_ASSERT(it != last, "Invalid head list node");
+
+            // If the head iterator is at the end of the list, or past the point where this dummy
+            // node needs to be inserted, then try to insert it.
+            if (where == last || get_order_key(where) > order_key)
+            {
+                __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
+
+                // Try to insert it in the right place
+                nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
+
+                if (inserted_node == dummy_node)
+                {
+                    // Insertion succeeded, check the list for order violations
+                    check_range();
+                    return raw_iterator(dummy_node);
+                }
+                else
+                {
+                    // Insertion failed: either dummy node was inserted by another thread, or
+                    // a real element was inserted at exactly the same place as dummy node.
+                    // Proceed with the search from the previous location where order key was
+                    // known to be larger (note: this is legal only because there is no safe
+                    // concurrent erase operation supported).
+                    where = it;
+                    ++where;
+                    continue;
+                }
+            }
+            else if (get_order_key(where) == order_key)
+            {
+                // Another dummy node with the same value found, discard the new one.
+                destroy_node(dummy_node);
+                return where;
+            }
+
+            // Move the iterator forward
+            it = where;
+            ++where;
+        }
+
+    }
+
+    // This erase function can handle both real and dummy nodes
+    void erase_node(raw_iterator previous, raw_const_iterator& where)
+    {
+        nodeptr_t pnode = (where++).get_node_ptr();
+        nodeptr_t prevnode = previous.get_node_ptr();
+        __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
+        prevnode->my_next = pnode->my_next;
+
+        destroy_node(pnode);
+    }
+
+    // Erase the element (previous node needs to be passed because this is a forward only list)
+    iterator erase_node(raw_iterator previous, const_iterator where)
+    {
+        raw_const_iterator it = where;
+        erase_node(previous, it);
+        my_element_count--;
+
+        return get_iterator(first_real_iterator(it));
+    }
+
+    // Move all elements from the passed in split-ordered list to this one
+    void move_all(self_type& source)
+    {
+        raw_const_iterator first = source.raw_begin();
+        raw_const_iterator last = source.raw_end();
+
+        if (first == last)
+            return;
+
+        nodeptr_t previous_node = my_head;
+        raw_const_iterator begin_iterator = first++;
+
+        // Move all elements one by one, including dummy ones
+        for (raw_const_iterator it = first; it != last;)
+        {
+            nodeptr_t pnode = it.get_node_ptr();
+
+            nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
+            previous_node = try_insert(previous_node, dummy_node, NULL);
+            __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
+            raw_const_iterator where = it++;
+            source.erase_node(get_iterator(begin_iterator), where);
+        }
+        check_range();
+    }
+
+
+private:
+
+    // Check the list for order violations
+    void check_range()
+    {
+#if TBB_USE_ASSERT
+        for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
+        {
+            raw_iterator next_iterator = it;
+            ++next_iterator;
+
+            __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
+        }
+#endif
+    }
+
+    typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
+    size_type                                             my_element_count;   // Total item count, not counting dummy nodes
+    nodeptr_t                                             my_head;            // pointer to head node
+};
+
+// Template class for hash compare
+template<typename Key, typename Hasher, typename Key_equality>
+class hash_compare
+{
+public:
+    hash_compare() {}
+
+    hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
+
+    hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
+
+    size_t operator()(const Key& key) const {
+        return ((size_t)my_hash_object(key));
+    }
+
+    bool operator()(const Key& key1, const Key& key2) const {
+        return (!my_key_compare_object(key1, key2));
+    }
+
+    Hasher       my_hash_object;        // The hash object
+    Key_equality my_key_compare_object; // The equality comparator object
+};
+
+#if _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
+#endif
+
+template <typename Traits>
+class concurrent_unordered_base : public Traits
+{
+protected:
+    // Type definitions
+    typedef concurrent_unordered_base<Traits> self_type;
+    typedef typename Traits::value_type value_type;
+    typedef typename Traits::key_type key_type;
+    typedef typename Traits::hash_compare hash_compare;
+    typedef typename Traits::value_compare value_compare;
+    typedef typename Traits::allocator_type allocator_type;
+    typedef typename allocator_type::pointer pointer;
+    typedef typename allocator_type::const_pointer const_pointer;
+    typedef typename allocator_type::reference reference;
+    typedef typename allocator_type::const_reference const_reference;
+    typedef typename allocator_type::size_type size_type;
+    typedef typename allocator_type::difference_type difference_type;
+    typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
+    typedef typename solist_t::nodeptr_t nodeptr_t;
+    // Iterators that walk the entire split-order list, including dummy nodes
+    typedef typename solist_t::raw_iterator raw_iterator;
+    typedef typename solist_t::raw_const_iterator raw_const_iterator;
+    typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
+    typedef typename solist_t::const_iterator const_iterator;
+    typedef iterator local_iterator;
+    typedef const_iterator const_local_iterator;
+    using Traits::my_hash_compare;
+    using Traits::get_key;
+    using Traits::allow_multimapping;
+
+private:
+    typedef std::pair<iterator, iterator> pairii_t;
+    typedef std::pair<const_iterator, const_iterator> paircc_t;
+
+    static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
+    static const size_type initial_bucket_number = 8;                               // Initial number of buckets
+    static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
+
+protected:
+    // Constructors/Destructors
+    concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
+        const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
+        : Traits(hc), my_solist(a),
+          my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
+    {
+        if( n_of_buckets == 0) ++n_of_buckets;
+        my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
+        internal_init();
+    }
+
+    concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
+        : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
+    {
+        internal_copy(right);
+    }
+
+    concurrent_unordered_base(const concurrent_unordered_base& right)
+        : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
+    {
+        internal_init();
+        internal_copy(right);
+    }
+
+    concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
+        if (this != &right)
+            internal_copy(right);
+        return (*this);
+    }
+
+    ~concurrent_unordered_base() {
+        // Delete all node segments
+        internal_clear();
+    }
+
+public:
+    allocator_type get_allocator() const {
+        return my_solist.get_allocator();
+    }
+
+    // Size and capacity function
+    bool empty() const {
+        return my_solist.empty();
+    }
+
+    size_type size() const {
+        return my_solist.size();
+    }
+
+    size_type max_size() const {
+        return my_solist.max_size();
+    }
+
+    // Iterators 
+    iterator begin() {
+        return my_solist.begin();
+    }
+
+    const_iterator begin() const {
+        return my_solist.begin();
+    }
+
+    iterator end() {
+        return my_solist.end();
+    }
+
+    const_iterator end() const {
+        return my_solist.end();
+    }
+
+    const_iterator cbegin() const {
+        return my_solist.cbegin();
+    }
+
+    const_iterator cend() const {
+        return my_solist.cend();
+    }
+
+    // Parallel traversal support
+    class const_range_type : tbb::internal::no_assign {
+        const concurrent_unordered_base &my_table;
+        raw_const_iterator my_begin_node;
+        raw_const_iterator my_end_node;
+        mutable raw_const_iterator my_midpoint_node;
+    public:
+        //! Type for size of a range
+        typedef typename concurrent_unordered_base::size_type size_type;
+        typedef typename concurrent_unordered_base::value_type value_type;
+        typedef typename concurrent_unordered_base::reference reference;
+        typedef typename concurrent_unordered_base::difference_type difference_type;
+        typedef typename concurrent_unordered_base::const_iterator iterator;
+
+        //! True if range is empty.
+        bool empty() const {return my_begin_node == my_end_node;}
+
+        //! True if range can be partitioned into two subranges.
+        bool is_divisible() const {
+            return my_midpoint_node != my_end_node;
+        }
+        //! Split range.
+        const_range_type( const_range_type &r, split ) : 
+            my_table(r.my_table), my_end_node(r.my_end_node)
+        {
+            r.my_end_node = my_begin_node = r.my_midpoint_node;
+            __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
+            __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
+            set_midpoint();
+            r.set_midpoint();
+        }
+        //! Init range with container and grainsize specified
+        const_range_type( const concurrent_unordered_base &a_table ) : 
+            my_table(a_table), my_begin_node(a_table.my_solist.begin()),
+            my_end_node(a_table.my_solist.end())
+        {
+            set_midpoint();
+        }
+        iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
+        iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
+        //! The grain size for this range.
+        size_type grainsize() const { return 1; }
+
+        //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
+        void set_midpoint() const {
+            if( my_begin_node == my_end_node ) // not divisible
+                my_midpoint_node = my_end_node;
+            else {
+                sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
+                sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
+                size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
+                while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
+                my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
+                if( my_midpoint_node == my_begin_node )
+                    my_midpoint_node = my_end_node;
+#if TBB_USE_ASSERT
+                else {
+                    sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
+                    __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
+                    __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
+                }
+#endif // TBB_USE_ASSERT
+            }
+        }
+    };
+
+    class range_type : public const_range_type {
+    public:
+        typedef typename concurrent_unordered_base::iterator iterator;
+        //! Split range.
+        range_type( range_type &r, split ) : const_range_type( r, split() ) {}
+        //! Init range with container and grainsize specified
+        range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
+
+        iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
+        iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
+    };
+
+    range_type range() {
+        return range_type( *this );
+    }
+
+    const_range_type range() const {
+        return const_range_type( *this );
+    }
+
+    // Modifiers
+    std::pair<iterator, bool> insert(const value_type& value) {
+        return internal_insert(value);
+    }
+
+    iterator insert(const_iterator, const value_type& value) {
+        // Ignore hint
+        return insert(value).first;
+    }
+
+    template<class Iterator>
+    void insert(Iterator first, Iterator last) {
+        for (Iterator it = first; it != last; ++it)
+            insert(*it);
+    }
+
+    iterator unsafe_erase(const_iterator where) {
+        return internal_erase(where);
+    }
+
+    iterator unsafe_erase(const_iterator first, const_iterator last) {
+        while (first != last)
+            unsafe_erase(first++);
+        return my_solist.get_iterator(first);
+    }
+
+    size_type unsafe_erase(const key_type& key) {
+        pairii_t where = equal_range(key);
+        size_type item_count = internal_distance(where.first, where.second);
+        unsafe_erase(where.first, where.second);
+        return item_count;
+    }
+
+    void swap(concurrent_unordered_base& right) {
+        if (this != &right) {
+            std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
+            my_solist.swap(right.my_solist);
+            internal_swap_buckets(right);
+            std::swap(my_number_of_buckets, right.my_number_of_buckets);
+            std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
+        }
+    }
+
+    // Observers
+    void clear() {
+        // Clear list
+        my_solist.clear();
+
+        // Clear buckets
+        internal_clear();
+    }
+
+    // Lookup
+    iterator find(const key_type& key) {
+        return internal_find(key);
+    }
+
+    const_iterator find(const key_type& key) const {
+        return const_cast<self_type*>(this)->internal_find(key);
+    }
+
+    size_type count(const key_type& key) const {
+        if(allow_multimapping) {
+            paircc_t answer = equal_range(key);
+            size_type item_count = internal_distance(answer.first, answer.second);
+            return item_count;
+        } else {
+            return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
+        }
+    }
+
+    std::pair<iterator, iterator> equal_range(const key_type& key) {
+        return internal_equal_range(key);
+    }
+
+    std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
+        return const_cast<self_type*>(this)->internal_equal_range(key);
+    }
+
+    // Bucket interface - for debugging 
+    size_type unsafe_bucket_count() const {
+        return my_number_of_buckets;
+    }
+
+    size_type unsafe_max_bucket_count() const {
+        return segment_size(pointers_per_table-1);
+    }
+
+    size_type unsafe_bucket_size(size_type bucket) {
+        size_type item_count = 0;
+        if (is_initialized(bucket)) {
+            raw_iterator it = get_bucket(bucket);
+            ++it;
+            for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
+                ++item_count;
+        }
+        return item_count;
+    }
+
+    size_type unsafe_bucket(const key_type& key) const {
+        sokey_t order_key = (sokey_t) my_hash_compare(key);
+        size_type bucket = order_key % my_number_of_buckets;
+        return bucket;
+    }
+
+    // If the bucket is initialized, return a first non-dummy element in it
+    local_iterator unsafe_begin(size_type bucket) {
+        if (!is_initialized(bucket))
+            return end();
+
+        raw_iterator it = get_bucket(bucket);
+        return my_solist.first_real_iterator(it);
+    }
+
+    // If the bucket is initialized, return a first non-dummy element in it
+    const_local_iterator unsafe_begin(size_type bucket) const
+    {
+        if (!is_initialized(bucket))
+            return end();
+
+        raw_const_iterator it = get_bucket(bucket);
+        return my_solist.first_real_iterator(it);
+    }
+
+    // @REVIEW: Takes O(n)
+    // Returns the iterator after the last non-dummy element in the bucket
+    local_iterator unsafe_end(size_type bucket)
+    {
+        if (!is_initialized(bucket))
+            return end();
+
+        raw_iterator it = get_bucket(bucket);
+    
+        // Find the end of the bucket, denoted by the dummy element
+        do ++it;
+        while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
+
+        // Return the first real element past the end of the bucket
+        return my_solist.first_real_iterator(it);
+    }
+
+    // @REVIEW: Takes O(n)
+    // Returns the iterator after the last non-dummy element in the bucket
+    const_local_iterator unsafe_end(size_type bucket) const
+    {
+        if (!is_initialized(bucket))
+            return end();
+
+        raw_const_iterator it = get_bucket(bucket);
+    
+        // Find the end of the bucket, denoted by the dummy element
+        do ++it;
+        while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
+
+        // Return the first real element past the end of the bucket
+        return my_solist.first_real_iterator(it);
+    }
+
+    const_local_iterator unsafe_cbegin(size_type bucket) const {
+        return ((const self_type *) this)->begin();
+    }
+
+    const_local_iterator unsafe_cend(size_type bucket) const {
+        return ((const self_type *) this)->end();
+    }
+
+    // Hash policy
+    float load_factor() const {
+        return (float) size() / (float) unsafe_bucket_count();
+    }
+
+    float max_load_factor() const {
+        return my_maximum_bucket_size;
+    }
+
+    void max_load_factor(float newmax) {
+        if (newmax != newmax || newmax < 0)
+            tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
+        my_maximum_bucket_size = newmax;
+    }
+
+    // This function is a noop, because the underlying split-ordered list
+    // is already sorted, so an increase in the bucket number will be
+    // reflected next time this bucket is touched.
+    void rehash(size_type buckets) {
+        size_type current_buckets = my_number_of_buckets;
+        if (current_buckets >= buckets)
+            return;
+        my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
+    }
+
+private:
+
+    // Initialize the hash and keep the first bucket open
+    void internal_init() {
+        // Allocate an array of segment pointers
+        memset(my_buckets, 0, pointers_per_table * sizeof(void *));
+
+        // Insert the first element in the split-ordered list
+        raw_iterator dummy_node = my_solist.raw_begin();
+        set_bucket(0, dummy_node);
+    }
+
+    void internal_clear() {
+        for (size_type index = 0; index < pointers_per_table; ++index) {
+            if (my_buckets[index] != NULL) {
+                size_type sz = segment_size(index);
+                for (size_type index2 = 0; index2 < sz; ++index2)
+                    my_allocator.destroy(&my_buckets[index][index2]);
+                my_allocator.deallocate(my_buckets[index], sz);
+                my_buckets[index] = 0;
+            }
+        }
+    }
+
+    void internal_copy(const self_type& right) {
+        clear();
+
+        my_maximum_bucket_size = right.my_maximum_bucket_size;
+        my_number_of_buckets = right.my_number_of_buckets;
+
+        __TBB_TRY {
+            insert(right.begin(), right.end());
+            my_hash_compare = right.my_hash_compare;
+        } __TBB_CATCH(...) {
+            my_solist.clear();
+            __TBB_RETHROW();
+        }
+    }
+
+    void internal_swap_buckets(concurrent_unordered_base& right)
+    {
+        // Swap all node segments
+        for (size_type index = 0; index < pointers_per_table; ++index)
+        {
+            raw_iterator * iterator_pointer = my_buckets[index];
+            my_buckets[index] = right.my_buckets[index];
+            right.my_buckets[index] = iterator_pointer;
+        }
+    }
+
+    // Hash APIs
+    size_type internal_distance(const_iterator first, const_iterator last) const
+    {
+        size_type num = 0;
+
+        for (const_iterator it = first; it != last; ++it)
+            ++num;
+
+        return num;
+    }
+
+    // Insert an element in the hash given its value
+    std::pair<iterator, bool> internal_insert(const value_type& value)
+    {
+        sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
+        size_type bucket = order_key % my_number_of_buckets;
+
+        // If bucket is empty, initialize it first
+        if (!is_initialized(bucket))
+            init_bucket(bucket);
+
+        size_type new_count;
+        order_key = split_order_key_regular(order_key);
+        raw_iterator it = get_bucket(bucket);
+        raw_iterator last = my_solist.raw_end();
+        raw_iterator where = it;
+
+        __TBB_ASSERT(where != last, "Invalid head node");
+
+        // First node is a dummy node
+        ++where;
+
+        for (;;)
+        {
+            if (where == last || solist_t::get_order_key(where) > order_key)
+            {
+                // Try to insert it in the right place
+                std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
+                
+                if (result.second)
+                {
+                    // Insertion succeeded, adjust the table size, if needed
+                    adjust_table_size(new_count, my_number_of_buckets);
+                    return result;
+                }
+                else
+                {
+                    // Insertion failed: either the same node was inserted by another thread, or
+                    // another element was inserted at exactly the same place as this node.
+                    // Proceed with the search from the previous location where order key was
+                    // known to be larger (note: this is legal only because there is no safe
+                    // concurrent erase operation supported).
+                    where = it;
+                    ++where;
+                    continue;
+                }
+            }
+            else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
+            {
+                // Element already in the list, return it
+                return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
+            }
+
+            // Move the iterator forward
+            it = where;
+            ++where;
+        }
+    }
+
+    // Find the element in the split-ordered list
+    iterator internal_find(const key_type& key)
+    {
+        sokey_t order_key = (sokey_t) my_hash_compare(key);
+        size_type bucket = order_key % my_number_of_buckets;
+
+        // If bucket is empty, initialize it first
+        if (!is_initialized(bucket))
+            init_bucket(bucket);
+
+        order_key = split_order_key_regular(order_key);
+        raw_iterator last = my_solist.raw_end();
+
+        for (raw_iterator it = get_bucket(bucket); it != last; ++it)
+        {
+            if (solist_t::get_order_key(it) > order_key)
+            {
+                // If the order key is smaller than the current order key, the element
+                // is not in the hash.
+                return end();
+            }
+            else if (solist_t::get_order_key(it) == order_key)
+            {
+                // The fact that order keys match does not mean that the element is found.
+                // Key function comparison has to be performed to check whether this is the
+                // right element. If not, keep searching while order key is the same.
+                if (!my_hash_compare(get_key(*it), key))
+                    return my_solist.get_iterator(it);
+            }
+        }
+
+        return end();
+    }
+
+    // Erase an element from the list. This is not a concurrency safe function.
+    iterator internal_erase(const_iterator it)
+    {
+        key_type key = get_key(*it);
+        sokey_t order_key = (sokey_t) my_hash_compare(key);
+        size_type bucket = order_key % my_number_of_buckets;
+
+        // If bucket is empty, initialize it first
+        if (!is_initialized(bucket))
+            init_bucket(bucket);
+
+        order_key = split_order_key_regular(order_key);
+
+        raw_iterator previous = get_bucket(bucket);
+        raw_iterator last = my_solist.raw_end();
+        raw_iterator where = previous;
+
+        __TBB_ASSERT(where != last, "Invalid head node");
+
+        // First node is a dummy node
+        ++where;
+
+        for (;;) {
+            if (where == last)
+                return end();
+            else if (my_solist.get_iterator(where) == it)
+                return my_solist.erase_node(previous, it);
+
+            // Move the iterator forward
+            previous = where;
+            ++where;
+        }
+    }
+
+    // Return the [begin, end) pair of iterators with the same key values.
+    // This operation makes sense only if mapping is many-to-one.
+    pairii_t internal_equal_range(const key_type& key)
+    {
+        sokey_t order_key = (sokey_t) my_hash_compare(key);
+        size_type bucket = order_key % my_number_of_buckets;
+
+        // If bucket is empty, initialize it first
+        if (!is_initialized(bucket))
+            init_bucket(bucket);
+
+        order_key = split_order_key_regular(order_key);
+        raw_iterator end_it = my_solist.raw_end();
+
+        for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
+        {
+            if (solist_t::get_order_key(it) > order_key)
+            {
+                // There is no element with the given key
+                return pairii_t(end(), end());
+            }
+            else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
+            {
+                iterator first = my_solist.get_iterator(it);
+                iterator last = first;
+                do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
+                return pairii_t(first, last);
+            }
+        }
+
+        return pairii_t(end(), end());
+    }
+
+    // Bucket APIs
+    void init_bucket(size_type bucket)
+    {
+        // Bucket 0 has no parent. Initialize it and return.
+        if (bucket == 0) {
+            internal_init();
+            return;
+        }
+
+        size_type parent_bucket = get_parent(bucket);
+
+        // All parent_bucket buckets have to be initialized before this bucket is
+        if (!is_initialized(parent_bucket))
+            init_bucket(parent_bucket);
+
+        raw_iterator parent = get_bucket(parent_bucket);
+
+        // Create a dummy first node in this bucket
+        raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
+        set_bucket(bucket, dummy_node);
+    }
+
+    void adjust_table_size(size_type total_elements, size_type current_size)
+    {
+        // Grow the table by a factor of 2 if possible and needed
+        if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
+        {
+            // Double the size of the hash only if size has not changed inbetween loads
+            __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
+            //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
+            //due to overzealous compiler warnings in /Wp64 mode
+        }
+    }
+
+    size_type get_parent(size_type bucket) const
+    {
+        // Unsets bucket's most significant turned-on bit
+        size_type msb = __TBB_Log2((uintptr_t)bucket);
+        return bucket & ~(size_type(1) << msb);
+    }
+
+
+    // Dynamic sized array (segments)
+    //! @return segment index of given index in the array
+    static size_type segment_index_of( size_type index ) {
+        return size_type( __TBB_Log2( uintptr_t(index|1) ) );
+    }
+
+    //! @return the first array index of given segment
+    static size_type segment_base( size_type k ) {
+        return (size_type(1)<<k & ~size_type(1));
+    }
+
+    //! @return segment size
+    static size_type segment_size( size_type k ) {
+        return k? size_type(1)<<k : 2;
+    }
+
+    raw_iterator get_bucket(size_type bucket) const {
+        size_type segment = segment_index_of(bucket);
+        bucket -= segment_base(segment);
+        __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
+        return my_buckets[segment][bucket];
+    }
+
+    void set_bucket(size_type bucket, raw_iterator dummy_head) {
+        size_type segment = segment_index_of(bucket);
+        bucket -= segment_base(segment);
+
+        if (my_buckets[segment] == NULL) {
+            size_type sz = segment_size(segment);
+            raw_iterator * new_segment = my_allocator.allocate(sz);
+            std::memset(new_segment, 0, sz*sizeof(raw_iterator));
+
+            if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
+                my_allocator.deallocate(new_segment, sz);
+        }
+
+        my_buckets[segment][bucket] = dummy_head;
+    }
+
+    bool is_initialized(size_type bucket) const {
+        size_type segment = segment_index_of(bucket);
+        bucket -= segment_base(segment);
+
+        if (my_buckets[segment] == NULL)
+            return false;
+
+        raw_iterator it = my_buckets[segment][bucket];
+        return (it.get_node_ptr() != NULL);
+    }
+
+    // Utilities for keys
+
+    // A regular order key has its original hash value reversed and the last bit set
+    sokey_t split_order_key_regular(sokey_t order_key) const {
+        return __TBB_ReverseBits(order_key) | 0x1;
+    }
+
+    // A dummy order key has its original hash value reversed and the last bit unset
+    sokey_t split_order_key_dummy(sokey_t order_key) const {
+        return __TBB_ReverseBits(order_key) & ~(0x1);
+    }
+
+    // Shared variables
+    atomic<size_type>                                             my_number_of_buckets;       // Current table size
+    solist_t                                                      my_solist;                  // List where all the elements are kept
+    typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
+    float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
+    atomic<raw_iterator*>                                         my_buckets[pointers_per_table]; // The segment table
+};
+#if _MSC_VER
+#pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
+#endif
+
+//! Hash multiplier
+static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
+} // namespace internal
+//! @endcond
+//! Hasher functions
+template<typename T>
+inline size_t tbb_hasher( const T& t ) {
+    return static_cast<size_t>( t ) * internal::hash_multiplier;
+}
+template<typename P>
+inline size_t tbb_hasher( P* ptr ) {
+    size_t const h = reinterpret_cast<size_t>( ptr );
+    return (h >> 3) ^ h;
+}
+template<typename E, typename S, typename A>
+inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
+    size_t h = 0;
+    for( const E* c = s.c_str(); *c; ++c )
+        h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
+    return h;
+}
+template<typename F, typename S>
+inline size_t tbb_hasher( const std::pair<F,S>& p ) {
+    return tbb_hasher(p.first) ^ tbb_hasher(p.second);
+}
+} // namespace interface5
+using interface5::tbb_hasher;
+} // namespace tbb
+#endif// __TBB_concurrent_unordered_internal_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_tbb_windef.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/_tbb_windef.h
new file mode 100644 (file)
index 0000000..7ca1069
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_windef_H
+#error Do not #include this file directly.  Use "#include tbb/tbb_stddef.h" instead.
+#endif /* __TBB_tbb_windef_H */
+
+// Check that the target Windows version has all API calls requried for TBB.
+// Do not increase the version in condition beyond 0x0500 without prior discussion!
+#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0400
+#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0400 or greater.
+#endif
+
+#if !defined(_MT)
+#error TBB requires linkage with multithreaded C/C++ runtime library. \
+       Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch.
+#endif
+
+// Workaround for the problem with MVSC headers failing to define namespace std
+namespace std {
+  using ::size_t; using ::ptrdiff_t;
+}
+
+#define __TBB_STRING_AUX(x) #x
+#define __TBB_STRING(x) __TBB_STRING_AUX(x)
+
+// Default setting of TBB_USE_DEBUG
+#ifdef TBB_USE_DEBUG
+#    if TBB_USE_DEBUG 
+#        if !defined(_DEBUG)
+#            pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0")
+#        endif
+#    else
+#        if defined(_DEBUG)
+#            pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0")
+#        endif
+#    endif
+#else
+#    ifdef _DEBUG
+#        define TBB_USE_DEBUG 1
+#    endif
+#endif 
+
+#if __TBB_BUILD && !defined(__TBB_NO_IMPLICIT_LINKAGE)
+#define __TBB_NO_IMPLICIT_LINKAGE 1
+#endif
+
+#if _MSC_VER
+    #if !__TBB_NO_IMPLICIT_LINKAGE
+        #ifdef _DEBUG
+            #pragma comment(lib, "tbb_debug.lib")
+        #else
+            #pragma comment(lib, "tbb.lib")
+        #endif
+    #endif
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/aligned_space.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/aligned_space.h
new file mode 100644 (file)
index 0000000..0d76b3f
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_aligned_space_H
+#define __TBB_aligned_space_H
+
+#include "tbb_stddef.h"
+#include "tbb_machine.h"
+
+namespace tbb {
+
+//! Block of space aligned sufficiently to construct an array T with N elements.
+/** The elements are not constructed or destroyed by this class.
+    @ingroup memory_allocation */
+template<typename T,size_t N>
+class aligned_space {
+private:
+    typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type;
+    element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)];
+public:
+    //! Pointer to beginning of array
+    T* begin() {return internal::punned_cast<T*>(this);}
+
+    //! Pointer to one past last element in array.
+    T* end() {return begin()+N;}
+};
+
+} // namespace tbb 
+
+#endif /* __TBB_aligned_space_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/atomic.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/atomic.h
new file mode 100644 (file)
index 0000000..52591ff
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_atomic_H
+#define __TBB_atomic_H
+
+#include <cstddef>
+#include "tbb_stddef.h"
+
+#if _MSC_VER 
+#define __TBB_LONG_LONG __int64
+#else
+#define __TBB_LONG_LONG long long
+#endif /* _MSC_VER */
+
+#include "tbb_machine.h"
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings 
+    #pragma warning (push)
+    #pragma warning (disable: 4244 4267)
+#endif
+
+namespace tbb {
+
+//! Specifies memory fencing.
+enum memory_semantics {
+    //! For internal use only.
+    __TBB_full_fence,
+    //! Acquire fence
+    acquire,
+    //! Release fence
+    release
+};
+
+//! @cond INTERNAL
+namespace internal {
+
+#if __GNUC__ || __SUNPRO_CC
+#define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
+#elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
+#define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
+#else 
+#error Do not know syntax for forcing alignment.
+#endif /* __GNUC__ */
+
+template<size_t S>
+struct atomic_rep;           // Primary template declared, but never defined.
+
+template<>
+struct atomic_rep<1> {       // Specialization
+    typedef int8_t word;
+    int8_t value;
+};
+template<>
+struct atomic_rep<2> {       // Specialization
+    typedef int16_t word;
+    __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
+};
+template<>
+struct atomic_rep<4> {       // Specialization
+#if _MSC_VER && __TBB_WORDSIZE==4
+    // Work-around that avoids spurious /Wp64 warnings
+    typedef intptr_t word;
+#else
+    typedef int32_t word;
+#endif
+    __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
+};
+template<>
+struct atomic_rep<8> {       // Specialization
+    typedef int64_t word;
+    __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
+};
+
+template<size_t Size, memory_semantics M>
+struct atomic_traits;        // Primary template declared, but not defined.
+
+#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
+    template<> struct atomic_traits<S,M> {                               \
+        typedef atomic_rep<S>::word word;                               \
+        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
+            return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
+        }                                                                       \
+        inline static word fetch_and_add( volatile void* location, word addend ) { \
+            return __TBB_FetchAndAdd##S##M(location,addend);                    \
+        }                                                                       \
+        inline static word fetch_and_store( volatile void* location, word value ) {\
+            return __TBB_FetchAndStore##S##M(location,value);                   \
+        }                                                                       \
+    };
+
+#define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
+    template<memory_semantics M>                                         \
+    struct atomic_traits<S,M> {                                          \
+        typedef atomic_rep<S>::word word;                               \
+        inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
+            return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
+        }                                                                       \
+        inline static word fetch_and_add( volatile void* location, word addend ) { \
+            return __TBB_FetchAndAdd##S(location,addend);                       \
+        }                                                                       \
+        inline static word fetch_and_store( volatile void* location, word value ) {\
+            return __TBB_FetchAndStore##S(location,value);                      \
+        }                                                                       \
+    };
+
+#if __TBB_DECL_FENCED_ATOMICS
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
+__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
+#else
+__TBB_DECL_ATOMIC_PRIMITIVES(1)
+__TBB_DECL_ATOMIC_PRIMITIVES(2)
+__TBB_DECL_ATOMIC_PRIMITIVES(4)
+__TBB_DECL_ATOMIC_PRIMITIVES(8)
+#endif
+
+//! Additive inverse of 1 for type T.
+/** Various compilers issue various warnings if -1 is used with various integer types.
+    The baroque expression below avoids all the warnings (we hope). */
+#define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
+
+//! Base class that provides basic functionality for atomic<T> without fetch_and_add.
+/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, 
+    and can be copied/compared by memcpy/memcmp. */
+template<typename T>
+struct atomic_impl {
+protected:
+    atomic_rep<sizeof(T)> rep;
+private:
+    //! Union type used to convert type T to underlying integral type.
+    union converter {
+        T value;
+        typename atomic_rep<sizeof(T)>::word bits;
+    };
+public:
+    typedef T value_type;
+
+    template<memory_semantics M>
+    value_type fetch_and_store( value_type value ) {
+        converter u, w;
+        u.value = value;
+        w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
+        return w.value;
+    }
+
+    value_type fetch_and_store( value_type value ) {
+        return fetch_and_store<__TBB_full_fence>(value);
+    }
+
+    template<memory_semantics M>
+    value_type compare_and_swap( value_type value, value_type comparand ) {
+        converter u, v, w;
+        u.value = value;
+        v.value = comparand;
+        w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
+        return w.value;
+    }
+
+    value_type compare_and_swap( value_type value, value_type comparand ) {
+        return compare_and_swap<__TBB_full_fence>(value,comparand);
+    }
+
+    operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
+        converter w;
+        w.bits = __TBB_load_with_acquire( rep.value );
+        return w.value;
+    }
+
+protected:
+    value_type store_with_release( value_type rhs ) {
+        converter u;
+        u.value = rhs;
+        __TBB_store_with_release(rep.value,u.bits);
+        return rhs;
+    }
+};
+
+//! Base class that provides basic functionality for atomic<T> with fetch_and_add.
+/** I is the underlying type.
+    D is the difference type.
+    StepType should be char if I is an integral type, and T if I is a T*. */
+template<typename I, typename D, typename StepType>
+struct atomic_impl_with_arithmetic: atomic_impl<I> {
+public:
+    typedef I value_type;
+
+    template<memory_semantics M>
+    value_type fetch_and_add( D addend ) {
+        return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
+    }
+
+    value_type fetch_and_add( D addend ) {
+        return fetch_and_add<__TBB_full_fence>(addend);
+    }
+
+    template<memory_semantics M>
+    value_type fetch_and_increment() {
+        return fetch_and_add<M>(1);
+    }
+
+    value_type fetch_and_increment() {
+        return fetch_and_add(1);
+    }
+
+    template<memory_semantics M>
+    value_type fetch_and_decrement() {
+        return fetch_and_add<M>(__TBB_MINUS_ONE(D));
+    }
+
+    value_type fetch_and_decrement() {
+        return fetch_and_add(__TBB_MINUS_ONE(D));
+    }
+
+public:
+    value_type operator+=( D addend ) {
+        return fetch_and_add(addend)+addend;
+    }
+
+    value_type operator-=( D addend ) {
+        // Additive inverse of addend computed using binary minus,
+        // instead of unary minus, for sake of avoiding compiler warnings.
+        return operator+=(D(0)-addend);    
+    }
+
+    value_type operator++() {
+        return fetch_and_add(1)+1;
+    }
+
+    value_type operator--() {
+        return fetch_and_add(__TBB_MINUS_ONE(D))-1;
+    }
+
+    value_type operator++(int) {
+        return fetch_and_add(1);
+    }
+
+    value_type operator--(int) {
+        return fetch_and_add(__TBB_MINUS_ONE(D));
+    }
+};
+
+} /* Internal */
+//! @endcond
+
+//! Primary template for atomic.
+/** See the Reference for details.
+    @ingroup synchronization */
+template<typename T>
+struct atomic: internal::atomic_impl<T> {
+    T operator=( T rhs ) {
+        // "this" required here in strict ISO C++ because store_with_release is a dependent name
+        return this->store_with_release(rhs);
+    }
+    atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
+};
+
+#define __TBB_DECL_ATOMIC(T) \
+    template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
+        T operator=( T rhs ) {return store_with_release(rhs);}  \
+        atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
+    };
+
+__TBB_DECL_ATOMIC(__TBB_LONG_LONG)
+__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
+__TBB_DECL_ATOMIC(long)
+__TBB_DECL_ATOMIC(unsigned long)
+
+#if defined(_MSC_VER) && __TBB_WORDSIZE==4
+/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
+   It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
+   with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
+   type synonyms on the platform.  Type U should be the wider variant of T from the
+   perspective of /Wp64. */
+#define __TBB_DECL_ATOMIC_ALT(T,U) \
+    template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
+        T operator=( U rhs ) {return store_with_release(T(rhs));}  \
+        atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
+    };
+__TBB_DECL_ATOMIC_ALT(unsigned,size_t)
+__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
+#else
+__TBB_DECL_ATOMIC(unsigned)
+__TBB_DECL_ATOMIC(int)
+#endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
+
+__TBB_DECL_ATOMIC(unsigned short)
+__TBB_DECL_ATOMIC(short)
+__TBB_DECL_ATOMIC(char)
+__TBB_DECL_ATOMIC(signed char)
+__TBB_DECL_ATOMIC(unsigned char)
+
+#if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
+__TBB_DECL_ATOMIC(wchar_t)
+#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
+
+//! Specialization for atomic<T*> with arithmetic and operator->.
+template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
+    T* operator=( T* rhs ) {
+        // "this" required here in strict ISO C++ because store_with_release is a dependent name
+        return this->store_with_release(rhs);
+    }
+    atomic<T*>& operator=( const atomic<T*>& rhs ) {
+        this->store_with_release(rhs); return *this;
+    }
+    T* operator->() const {
+        return (*this);
+    }
+};
+
+//! Specialization for atomic<void*>, for sake of not allowing arithmetic or operator->.
+template<> struct atomic<void*>: internal::atomic_impl<void*> {
+    void* operator=( void* rhs ) {
+        // "this" required here in strict ISO C++ because store_with_release is a dependent name
+        return this->store_with_release(rhs);
+    }
+    atomic<void*>& operator=( const atomic<void*>& rhs ) {
+        this->store_with_release(rhs); return *this;
+    }
+};
+
+} // namespace tbb
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warnings 4244, 4267 are back
+
+#endif /* __TBB_atomic_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range.h
new file mode 100644 (file)
index 0000000..52c12cc
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_blocked_range_H
+#define __TBB_blocked_range_H
+
+#include "tbb_stddef.h"
+
+namespace tbb {
+
+/** \page range_req Requirements on range concept
+    Class \c R implementing the concept of range must define:
+    - \code R::R( const R& ); \endcode               Copy constructor
+    - \code R::~R(); \endcode                        Destructor
+    - \code bool R::is_divisible() const; \endcode   True if range can be partitioned into two subranges
+    - \code bool R::empty() const; \endcode          True if range is empty
+    - \code R::R( R& r, split ); \endcode            Split range \c r into two subranges.
+**/
+
+//! A range over which to iterate.
+/** @ingroup algorithms */
+template<typename Value>
+class blocked_range {
+public:
+    //! Type of a value
+    /** Called a const_iterator for sake of algorithms that need to treat a blocked_range
+        as an STL container. */
+    typedef Value const_iterator;
+
+    //! Type for size of a range
+    typedef std::size_t size_type;
+
+    //! Construct range with default-constructed values for begin and end.
+    /** Requires that Value have a default constructor. */
+    blocked_range() : my_end(), my_begin() {}
+
+    //! Construct range over half-open interval [begin,end), with the given grainsize.
+    blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : 
+        my_end(end_), my_begin(begin_), my_grainsize(grainsize_) 
+    {
+        __TBB_ASSERT( my_grainsize>0, "grainsize must be positive" );
+    }
+
+    //! Beginning of range.
+    const_iterator begin() const {return my_begin;}
+
+    //! One past last value in range.
+    const_iterator end() const {return my_end;}
+
+    //! Size of the range
+    /** Unspecified if end()<begin(). */
+    size_type size() const {
+        __TBB_ASSERT( !(end()<begin()), "size() unspecified if end()<begin()" );
+        return size_type(my_end-my_begin);
+    }
+
+    //! The grain size for this range.
+    size_type grainsize() const {return my_grainsize;}
+
+    //------------------------------------------------------------------------
+    // Methods that implement Range concept
+    //------------------------------------------------------------------------
+
+    //! True if range is empty.
+    bool empty() const {return !(my_begin<my_end);}
+
+    //! True if range is divisible.
+    /** Unspecified if end()<begin(). */
+    bool is_divisible() const {return my_grainsize<size();}
+
+    //! Split range.  
+    /** The new Range *this has the second half, the old range r has the first half. 
+        Unspecified if end()<begin() or !is_divisible(). */
+    blocked_range( blocked_range& r, split ) : 
+        my_end(r.my_end),
+        my_begin(do_split(r)),
+        my_grainsize(r.my_grainsize)
+    {}
+
+private:
+    /** NOTE: my_end MUST be declared before my_begin, otherwise the forking constructor will break. */
+    Value my_end;
+    Value my_begin;
+    size_type my_grainsize;
+
+    //! Auxiliary function used by forking constructor.
+    /** Using this function lets us not require that Value support assignment or default construction. */
+    static Value do_split( blocked_range& r ) {
+        __TBB_ASSERT( r.is_divisible(), "cannot split blocked_range that is not divisible" );
+        Value middle = r.my_begin + (r.my_end-r.my_begin)/2u;
+        r.my_end = middle;
+        return middle;
+    }
+
+    template<typename RowValue, typename ColValue>
+    friend class blocked_range2d;
+
+    template<typename RowValue, typename ColValue, typename PageValue>
+    friend class blocked_range3d;
+};
+
+} // namespace tbb 
+
+#endif /* __TBB_blocked_range_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range2d.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range2d.h
new file mode 100644 (file)
index 0000000..d541f42
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_blocked_range2d_H
+#define __TBB_blocked_range2d_H
+
+#include "tbb_stddef.h"
+#include "blocked_range.h"
+
+namespace tbb {
+
+//! A 2-dimensional range that models the Range concept.
+/** @ingroup algorithms */
+template<typename RowValue, typename ColValue=RowValue>
+class blocked_range2d {
+public:
+    //! Type for size of an iteation range
+    typedef blocked_range<RowValue> row_range_type;
+    typedef blocked_range<ColValue> col_range_type;
+private:
+    row_range_type my_rows;
+    col_range_type my_cols;
+
+public:
+
+    blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize,
+                     ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : 
+        my_rows(row_begin,row_end,row_grainsize),
+        my_cols(col_begin,col_end,col_grainsize)
+    {
+    }
+
+    blocked_range2d( RowValue row_begin, RowValue row_end,
+                     ColValue col_begin, ColValue col_end ) : 
+        my_rows(row_begin,row_end),
+        my_cols(col_begin,col_end)
+    {
+    }
+
+    //! True if range is empty
+    bool empty() const {
+        // Yes, it is a logical OR here, not AND.
+        return my_rows.empty() || my_cols.empty();
+    }
+
+    //! True if range is divisible into two pieces.
+    bool is_divisible() const {
+        return my_rows.is_divisible() || my_cols.is_divisible();
+    }
+
+    blocked_range2d( blocked_range2d& r, split ) : 
+        my_rows(r.my_rows),
+        my_cols(r.my_cols)
+    {
+        if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) {
+            my_cols.my_begin = col_range_type::do_split(r.my_cols);
+        } else {
+            my_rows.my_begin = row_range_type::do_split(r.my_rows);
+        }
+    }
+
+    //! The rows of the iteration space 
+    const row_range_type& rows() const {return my_rows;}
+
+    //! The columns of the iteration space 
+    const col_range_type& cols() const {return my_cols;}
+};
+
+} // namespace tbb 
+
+#endif /* __TBB_blocked_range2d_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range3d.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/blocked_range3d.h
new file mode 100644 (file)
index 0000000..b0bfbe0
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_blocked_range3d_H
+#define __TBB_blocked_range3d_H
+
+#include "tbb_stddef.h"
+#include "blocked_range.h"
+
+namespace tbb {
+
+//! A 3-dimensional range that models the Range concept.
+/** @ingroup algorithms */
+template<typename PageValue, typename RowValue=PageValue, typename ColValue=RowValue>
+class blocked_range3d {
+public:
+    //! Type for size of an iteation range
+    typedef blocked_range<PageValue> page_range_type;
+    typedef blocked_range<RowValue>  row_range_type;
+    typedef blocked_range<ColValue>  col_range_type;
+private:
+    page_range_type my_pages;
+    row_range_type  my_rows;
+    col_range_type  my_cols;
+
+public:
+
+    blocked_range3d( PageValue page_begin, PageValue page_end,
+                     RowValue  row_begin,  RowValue row_end,
+                     ColValue  col_begin,  ColValue col_end ) : 
+        my_pages(page_begin,page_end),
+        my_rows(row_begin,row_end),
+        my_cols(col_begin,col_end)
+    {
+    }
+
+    blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, 
+                     RowValue  row_begin,  RowValue row_end,   typename row_range_type::size_type row_grainsize,
+                     ColValue  col_begin,  ColValue col_end,   typename col_range_type::size_type col_grainsize ) :  
+        my_pages(page_begin,page_end,page_grainsize),
+        my_rows(row_begin,row_end,row_grainsize),
+        my_cols(col_begin,col_end,col_grainsize)
+    {
+    }
+
+    //! True if range is empty
+    bool empty() const {
+        // Yes, it is a logical OR here, not AND.
+        return my_pages.empty() || my_rows.empty() || my_cols.empty();
+    }
+
+    //! True if range is divisible into two pieces.
+    bool is_divisible() const {
+        return  my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible();
+    }
+
+    blocked_range3d( blocked_range3d& r, split ) : 
+        my_pages(r.my_pages),
+        my_rows(r.my_rows),
+        my_cols(r.my_cols)
+    {
+        if( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) {
+            if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) {
+                my_cols.my_begin = col_range_type::do_split(r.my_cols);
+            } else {
+                my_rows.my_begin = row_range_type::do_split(r.my_rows);
+            }
+       } else {
+            if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) {
+                my_cols.my_begin = col_range_type::do_split(r.my_cols);
+            } else {
+                    my_pages.my_begin = page_range_type::do_split(r.my_pages);
+            }
+        }
+    }
+
+    //! The pages of the iteration space 
+    const page_range_type& pages() const {return my_pages;}
+
+    //! The rows of the iteration space 
+    const row_range_type& rows() const {return my_rows;}
+
+    //! The columns of the iteration space 
+    const col_range_type& cols() const {return my_cols;}
+
+};
+
+} // namespace tbb 
+
+#endif /* __TBB_blocked_range3d_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/cache_aligned_allocator.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/cache_aligned_allocator.h
new file mode 100644 (file)
index 0000000..5889682
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_cache_aligned_allocator_H
+#define __TBB_cache_aligned_allocator_H
+
+#include <new>
+#include "tbb_stddef.h"
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    //! Cache/sector line size.
+    /** @ingroup memory_allocation */
+    size_t __TBB_EXPORTED_FUNC NFS_GetLineSize();
+
+    //! Allocate memory on cache/sector line boundary.
+    /** @ingroup memory_allocation */
+    void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint );
+
+    //! Free memory allocated by NFS_Allocate.
+    /** Freeing a NULL pointer is allowed, but has no effect.
+        @ingroup memory_allocation */
+    void __TBB_EXPORTED_FUNC NFS_Free( void* );
+}
+//! @endcond
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for erroneous "unreferenced parameter" warning in method destroy.
+    #pragma warning (push)
+    #pragma warning (disable: 4100)
+#endif
+
+//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5
+/** The members are ordered the same way they are in section 20.4.1
+    of the ISO C++ standard.
+    @ingroup memory_allocation */
+template<typename T>
+class cache_aligned_allocator {
+public:
+    typedef typename internal::allocator_type<T>::value_type value_type;
+    typedef value_type* pointer;
+    typedef const value_type* const_pointer;
+    typedef value_type& reference;
+    typedef const value_type& const_reference;
+    typedef size_t size_type;
+    typedef ptrdiff_t difference_type;
+    template<typename U> struct rebind {
+        typedef cache_aligned_allocator<U> other;
+    };
+
+    cache_aligned_allocator() throw() {}
+    cache_aligned_allocator( const cache_aligned_allocator& ) throw() {}
+    template<typename U> cache_aligned_allocator(const cache_aligned_allocator<U>&) throw() {}
+
+    pointer address(reference x) const {return &x;}
+    const_pointer address(const_reference x) const {return &x;}
+    
+    //! Allocate space for n objects, starting on a cache/sector line.
+    pointer allocate( size_type n, const void* hint=0 ) {
+        // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt
+        return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast<void*>(hint) ));
+    }
+
+    //! Free block of memory that starts on a cache line
+    void deallocate( pointer p, size_type ) {
+        internal::NFS_Free(p);
+    }
+
+    //! Largest value for which method allocate might succeed.
+    size_type max_size() const throw() {
+        return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type);
+    }
+
+    //! Copy-construct value at location pointed to by p.
+    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}
+
+    //! Destroy value at location pointed to by p.
+    void destroy( pointer p ) {p->~value_type();}
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warning 4100 is back
+
+//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1
+/** @ingroup memory_allocation */
+template<> 
+class cache_aligned_allocator<void> {
+public:
+    typedef void* pointer;
+    typedef const void* const_pointer;
+    typedef void value_type;
+    template<typename U> struct rebind {
+        typedef cache_aligned_allocator<U> other;
+    };
+};
+
+template<typename T, typename U>
+inline bool operator==( const cache_aligned_allocator<T>&, const cache_aligned_allocator<U>& ) {return true;}
+
+template<typename T, typename U>
+inline bool operator!=( const cache_aligned_allocator<T>&, const cache_aligned_allocator<U>& ) {return false;}
+
+} // namespace tbb
+
+#endif /* __TBB_cache_aligned_allocator_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/combinable.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/combinable.h
new file mode 100644 (file)
index 0000000..5510595
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_combinable_H
+#define __TBB_combinable_H
+
+#include "enumerable_thread_specific.h"
+#include "cache_aligned_allocator.h"
+
+namespace tbb {
+/** \name combinable
+    **/
+//@{
+//! Thread-local storage with optional reduction
+/** @ingroup containers */
+    template <typename T>
+        class combinable {
+    private:
+        typedef typename tbb::cache_aligned_allocator<T> my_alloc;
+
+        typedef typename tbb::enumerable_thread_specific<T, my_alloc, ets_no_key> my_ets_type;
+        my_ets_type my_ets; 
+    public:
+
+        combinable() { }
+
+        template <typename finit>
+        combinable( finit _finit) : my_ets(_finit) { }
+
+        //! destructor
+        ~combinable() { 
+        }
+
+        combinable(const combinable& other) : my_ets(other.my_ets) { }
+
+        combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; }
+
+        void clear() { my_ets.clear(); }
+
+        T& local() { return my_ets.local(); }
+
+        T& local(bool & exists) { return my_ets.local(exists); }
+
+        // combine_func_t has signature T(T,T) or T(const T&, const T&)
+        template <typename combine_func_t>
+        T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); }
+
+        // combine_func_t has signature void(T) or void(const T&)
+        template <typename combine_func_t>
+        void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); }
+
+    };
+} // namespace tbb
+#endif /* __TBB_combinable_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/condition_variable b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/condition_variable
new file mode 100644 (file)
index 0000000..2a2f600
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_condition_variable_H
+#define __TBB_condition_variable_H
+
+#if _WIN32||_WIN64
+#include "../machine/windows_api.h"
+
+namespace tbb { 
+namespace interface5 {
+namespace internal { 
+struct condition_variable_using_event
+{
+    //! Event for blocking waiting threads.
+    HANDLE event;
+    //! Protects invariants involving n_waiters, release_count, and epoch.
+    CRITICAL_SECTION mutex;
+    //! Number of threads waiting on this condition variable
+    int n_waiters;
+    //! Number of threads remaining that should no longer wait on this condition variable.
+    int release_count;
+    //! To keep threads from waking up prematurely with earlier signals.
+    unsigned epoch;
+};
+}}} // namespace tbb::interface5::internal
+
+#ifndef CONDITION_VARIABLE_INIT
+typedef void* CONDITION_VARIABLE;
+typedef CONDITION_VARIABLE* PCONDITION_VARIABLE;
+#endif
+
+#else /* if not _WIN32||_WIN64 */
+#include <errno.h> // some systems need it for ETIMEDOUT
+#include <pthread.h>
+#endif /* _WIN32||_WIN64 */
+
+#include "../tbb_stddef.h"
+#include "../mutex.h"
+#include "../tbb_thread.h"
+#include "../tbb_exception.h"
+#include "../tbb_profiling.h"
+
+namespace tbb {
+
+namespace interface5 {
+
+// C++0x standard working draft 30.4.3
+// Lock tag types
+struct defer_lock_t { }; //! do not acquire ownership of the mutex
+struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking
+struct adopt_lock_t { }; //! assume the calling thread has already
+const defer_lock_t defer_lock = {};
+const try_to_lock_t try_to_lock = {};
+const adopt_lock_t adopt_lock = {};
+
+// C++0x standard working draft 30.4.3.1
+//! lock_guard 
+template<typename M>
+class lock_guard : tbb::internal::no_copy {
+public:
+    //! mutex type
+    typedef M mutex_type;
+
+    //! Constructor
+    /** precondition: If mutex_type is not a recursive mutex, the calling thread
+        does not own the mutex m. */
+    explicit lock_guard(mutex_type& m) : pm(m) {m.lock();}
+    
+    //! Adopt_lock constructor
+    /** precondition: the calling thread owns the mutex m. */
+    lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {}
+
+    //! Destructor
+    ~lock_guard() { pm.unlock(); }
+private:
+    mutex_type& pm;
+};
+
+// C++0x standard working draft 30.4.3.2
+//! unique_lock 
+template<typename M>
+class unique_lock : tbb::internal::no_copy {
+    friend class condition_variable;
+public:
+    typedef M mutex_type;
+
+    // 30.4.3.2.1 construct/copy/destroy
+    // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use.
+    //! Constructor
+    /** postcondition: pm==0 && owns==false */
+    unique_lock() : pm(NULL), owns(false) {}
+
+    //! Constructor
+    /** precondition: if mutex_type is not a recursive mutex, the  calling thread
+        does not own the mutex m.  If the precondition is not met, a deadlock occurs.
+        postcondition: pm==&m and owns==true */
+    explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;}
+
+    //! Defer_lock constructor
+    /** postcondition: pm==&m and owns==false */
+    unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {}
+
+    //! Try_to_lock constructor
+    /** precondition: if mutex_type is not a recursive mutex, the  calling thread
+       does not own the mutex m.  If the precondition is not met, a deadlock occurs.
+       postcondition: pm==&m and owns==res where res is the value returned by
+       the call to m.try_lock(). */
+    unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();}
+
+    //! Adopt_lock constructor
+    /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail.
+        postcondition: pm==&m and owns==true */
+    unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {}
+
+    //! Timed unique_lock acquisition.
+    /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that 
+        it uses tbb::tick_count::interval_t to specify the time duration. */
+    unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );}
+
+    //! Destructor
+    ~unique_lock() { if( owns ) pm->unlock(); }
+
+    // 30.4.3.2.2 locking
+    //! Lock the mutex and own it.
+    void lock() {
+        if( pm ) {
+            if( !owns ) {
+                pm->lock();
+                owns = true;
+            } else 
+                throw_exception_v4( tbb::internal::eid_possible_deadlock );
+        } else 
+            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
+        __TBB_ASSERT( owns, NULL );
+    }
+
+    //! Try to lock the mutex. 
+    /** If successful, note that this lock owns it. Otherwise, set it false. */
+    bool try_lock() {
+        if( pm ) {
+            if( !owns )
+                owns = pm->try_lock();
+            else
+                throw_exception_v4( tbb::internal::eid_possible_deadlock );
+        } else 
+            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
+        return owns;
+    }
+    //! Try to lock the mutex. 
+    bool try_lock_for( const tick_count::interval_t &i );
+
+    //! Unlock the mutex
+    /** And note that this lock no longer owns it. */
+    void unlock() { 
+        if( owns ) {
+            pm->unlock();
+            owns = false;
+        } else
+            throw_exception_v4( tbb::internal::eid_operation_not_permitted );
+        __TBB_ASSERT( !owns, NULL );
+    }
+
+    // 30.4.3.2.3 modifiers
+    //! Swap the two unique locks
+    void swap(unique_lock& u) {
+        mutex_type* t_pm = u.pm;    u.pm   = pm;    pm   = t_pm;
+        bool t_owns      = u.owns;  u.owns = owns;  owns = t_owns;
+    }
+
+    //! Release control over the mutex.
+    mutex_type* release() {
+        mutex_type* o_pm = pm; 
+        pm = NULL; 
+        owns = false; 
+        return o_pm; 
+    }
+
+    // 30.4.3.2.4 observers
+    //! Does this lock own the mutex?
+    bool owns_lock() const { return owns; }
+
+    //! Does this lock own the mutex?
+    /*explicit*/ operator bool() const { return owns; }
+
+    //! Return the mutex that this lock currently has.
+    mutex_type* mutex() const { return pm; }
+
+private:
+    mutex_type* pm;
+    bool owns;
+};
+
+template<typename M>
+bool unique_lock<M>::try_lock_for( const tick_count::interval_t &i)
+{ 
+    const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */
+    // the smallest wait-time is 0.1 milliseconds.
+    bool res = pm->try_lock();
+    int duration_in_micro; 
+    if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) {
+        tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3
+        do {
+            this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds
+            duration_in_micro -= unique_lock_tick;
+            res = pm->try_lock();
+        } while( !res && duration_in_micro>unique_lock_tick );
+    }
+    return (owns=res);
+}
+
+//! Swap the two unique locks that have the mutexes of same type 
+template<typename M>
+void swap(unique_lock<M>& x, unique_lock<M>& y) { x.swap( y ); }
+
+namespace internal {
+
+#if _WIN32||_WIN64
+union condvar_impl_t {
+    condition_variable_using_event cv_event;
+    CONDITION_VARIABLE             cv_native;
+};
+
+void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv );
+void __TBB_EXPORTED_FUNC internal_destroy_condition_variable(    condvar_impl_t& cv );
+void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv );
+void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv );
+bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL );
+#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */
+typedef pthread_cond_t condvar_impl_t;
+#endif
+
+} // namespace internal
+
+//! cv_status
+/** C++0x standard working draft 30.5 */
+enum cv_status { no_timeout, timeout }; 
+
+//! condition variable
+/** C++0x standard working draft 30.5.1 
+    @ingroup synchronization */
+class condition_variable : tbb::internal::no_copy {
+public:
+    //! Constructor
+    condition_variable() { 
+#if _WIN32||_WIN64
+        internal_initialize_condition_variable( my_cv ); 
+#else
+        pthread_cond_init( &my_cv, NULL );
+#endif
+    }
+
+    //! Destructor
+    ~condition_variable() { 
+        //precondition: There shall be no thread blocked on *this.
+#if _WIN32||_WIN64
+        internal_destroy_condition_variable( my_cv );
+#else
+        pthread_cond_destroy( &my_cv );
+#endif
+    }
+
+    //! Notify one thread and wake it up
+    void notify_one() { 
+#if _WIN32||_WIN64
+        internal_condition_variable_notify_one( my_cv ); 
+#else
+        pthread_cond_signal( &my_cv );
+#endif
+    }
+
+    //! Notify all threads 
+    void notify_all() { 
+#if _WIN32||_WIN64
+        internal_condition_variable_notify_all( my_cv ); 
+#else
+        pthread_cond_broadcast( &my_cv );
+#endif
+    }
+
+    //! Release the mutex associated with the lock and wait on this condition variable
+    void wait(unique_lock<mutex>& lock);
+
+    //! Wait on this condition variable while pred is false
+    template <class Predicate>
+    void wait(unique_lock<mutex>& lock, Predicate pred) {
+        while( !pred() )
+            wait( lock );
+    }
+
+    //! Timed version of wait()
+    cv_status wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i );
+
+    //! Timed version of the predicated wait
+    /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */
+    template<typename Predicate>
+    bool wait_for(unique_lock<mutex>& lock, const tick_count::interval_t &i, Predicate pred)
+    {
+        while( !pred() ) {
+            cv_status st = wait_for( lock, i );
+            if( st==timeout )
+                return pred();
+        }
+        return true;
+    }
+
+    // C++0x standard working draft. 30.2.3
+    typedef internal::condvar_impl_t* native_handle_type;
+
+    native_handle_type native_handle() { return (native_handle_type) &my_cv; }
+
+private:
+    internal::condvar_impl_t my_cv;
+};
+
+
+#if _WIN32||_WIN64
+inline void condition_variable::wait( unique_lock<mutex>& lock )
+{
+    __TBB_ASSERT( lock.owns, NULL );
+    lock.owns = false;
+    if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) {
+        int ec = GetLastError();
+        // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT
+        __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL );
+        lock.owns = true;
+        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
+    }
+    lock.owns = true;
+}
+
+inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
+{
+    cv_status rc = no_timeout;
+    __TBB_ASSERT( lock.owns, NULL );
+    lock.owns = false;
+    // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait()
+    if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) {
+        int ec = GetLastError();
+        if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT )
+            rc = timeout;
+        else {
+            lock.owns = true;
+            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
+        }
+    }
+    lock.owns = true;
+    return rc;
+}
+#else
+#if __linux__
+#include <ctime>
+#else /* generic Unix */
+#include <sys/time.h>
+#endif
+
+inline void condition_variable::wait( unique_lock<mutex>& lock )
+{
+    __TBB_ASSERT( lock.owns, NULL );
+    lock.owns = false;
+    if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) {
+        lock.owns = true;
+        throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
+    }
+    // upon successful return, the mutex has been locked and is owned by the calling thread.
+    lock.owns = true;
+}
+
+inline cv_status condition_variable::wait_for( unique_lock<mutex>& lock, const tick_count::interval_t& i )
+{
+#if __linux__
+    struct timespec req;
+    double sec = i.seconds();
+    clock_gettime( CLOCK_REALTIME, &req );
+    req.tv_sec  += static_cast<long>(sec);
+    req.tv_nsec += static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
+#else /* generic Unix */
+    struct timeval tv;
+    struct timespec req;
+    double sec = i.seconds();
+    int status = gettimeofday(&tv, NULL);
+    __TBB_ASSERT_EX( status==0, "gettimeofday failed" );
+    req.tv_sec  = tv.tv_sec + static_cast<long>(sec);
+    req.tv_nsec = tv.tv_usec*1000 + static_cast<long>( (sec - static_cast<long>(sec))*1e9 );
+#endif /*(choice of OS) */
+
+    int ec;
+    cv_status rc = no_timeout;
+    __TBB_ASSERT( lock.owns, NULL );
+    lock.owns = false;
+    if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) {
+        if( ec==ETIMEDOUT )
+            rc = timeout;
+        else {
+            __TBB_ASSERT( lock.try_lock()==false, NULL );
+            lock.owns = true;
+            throw_exception_v4( tbb::internal::eid_condvar_wait_failed );
+        }
+    }
+    lock.owns = true;
+    return rc;
+}
+#endif /* !(_WIN32||_WIN64) */
+
+} // namespace interface5
+
+__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable)
+
+} // namespace tbb 
+
+#if TBB_IMPLEMENT_CPP0X
+
+namespace std {
+
+using tbb::interface5::defer_lock_t;
+using tbb::interface5::try_to_lock_t;
+using tbb::interface5::adopt_lock_t;
+using tbb::interface5::defer_lock;
+using tbb::interface5::try_to_lock;
+using tbb::interface5::adopt_lock;
+using tbb::interface5::lock_guard;
+using tbb::interface5::unique_lock;
+using tbb::interface5::swap;   /* this is for void std::swap(unique_lock<M>&,unique_lock<M>&) */
+using tbb::interface5::condition_variable;
+using tbb::interface5::cv_status;
+using tbb::interface5::timeout;
+using tbb::interface5::no_timeout;
+
+} // namespace std 
+
+#endif /* TBB_IMPLEMENT_CPP0X */
+
+#endif /* __TBB_condition_variable_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/ppl.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/ppl.h
new file mode 100644 (file)
index 0000000..f524abf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_compat_ppl_H
+#define __TBB_compat_ppl_H
+
+#include "../task_group.h"
+#include "../parallel_invoke.h"
+#include "../parallel_for_each.h"
+#include "../parallel_for.h"
+#include "../tbb_exception.h"
+#include "../critical_section.h"
+#include "../reader_writer_lock.h"
+#include "../combinable.h"
+
+namespace Concurrency {
+
+    using tbb::task_handle;
+    using tbb::task_group_status;
+    using tbb::task_group;
+    using tbb::structured_task_group;
+    using tbb::invalid_multiple_scheduling;
+    using tbb::missing_wait;
+    using tbb::make_task;
+
+    using tbb::not_complete;
+    using tbb::complete;
+    using tbb::canceled;
+
+    using tbb::is_current_task_group_canceling;
+
+    using tbb::parallel_invoke;
+    using tbb::strict_ppl::parallel_for;
+    using tbb::parallel_for_each;
+    using tbb::critical_section;
+    using tbb::reader_writer_lock;
+    using tbb::combinable;
+
+    using tbb::improper_lock;
+
+} // namespace Concurrency
+
+#endif /* __TBB_compat_ppl_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/thread b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/compat/thread
new file mode 100644 (file)
index 0000000..e4d3b32
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_thread_H
+#define __TBB_thread_H
+
+#include "../tbb_thread.h"
+
+#if TBB_IMPLEMENT_CPP0X
+
+namespace std {
+
+typedef tbb::tbb_thread thread;
+
+namespace this_thread {
+    using tbb::this_tbb_thread::get_id;
+    using tbb::this_tbb_thread::yield;
+
+    inline void sleep_for(const tbb::tick_count::interval_t& rel_time) {
+        tbb::internal::thread_sleep_v3( rel_time );
+    }
+
+}
+
+}
+
+#endif /* TBB_IMPLEMENT_CPP0X */
+
+#endif /* __TBB_thread_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_hash_map.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_hash_map.h
new file mode 100644 (file)
index 0000000..75b0f85
--- /dev/null
@@ -0,0 +1,1406 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_hash_map_H
+#define __TBB_concurrent_hash_map_H
+
+#include "tbb_stddef.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <iterator>
+#include <utility>      // Need std::pair
+#include <cstring>      // Need std::memset
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "cache_aligned_allocator.h"
+#include "tbb_allocator.h"
+#include "spin_rw_mutex.h"
+#include "atomic.h"
+#include "aligned_space.h"
+#include "tbb_exception.h"
+#include "_concurrent_unordered_internal.h" // Need tbb_hasher
+#if TBB_USE_PERFORMANCE_WARNINGS
+#include <typeinfo>
+#endif
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    //! ITT instrumented routine that loads pointer from location pointed to by src.
+    void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
+    //! ITT instrumented routine that stores src into location pointed to by dst.
+    void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
+    //! Routine that loads pointer from location pointed to by src without causing ITT to report a race.
+    void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
+}
+//! @endcond
+
+//! hash_compare that is default argument for concurrent_hash_map
+template<typename Key>
+struct tbb_hash_compare {
+    static size_t hash( const Key& a ) { return tbb_hasher(a); }
+    static bool equal( const Key& a, const Key& b ) { return a == b; }
+};
+
+namespace interface4 {
+
+    template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
+    class concurrent_hash_map;
+
+    //! @cond INTERNAL
+    namespace internal {
+
+
+    //! Type of a hash code.
+    typedef size_t hashcode_t;
+    //! Node base type
+    struct hash_map_node_base : tbb::internal::no_copy {
+        //! Mutex type
+        typedef spin_rw_mutex mutex_t;
+        //! Scoped lock type for mutex
+        typedef mutex_t::scoped_lock scoped_t;
+        //! Next node in chain
+        hash_map_node_base *next;
+        mutex_t mutex;
+    };
+    //! Incompleteness flag value
+    static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
+    //! Rehashed empty bucket flag
+    static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
+    //! base class of concurrent_hash_map
+    class hash_map_base {
+    public:
+        //! Size type
+        typedef size_t size_type;
+        //! Type of a hash code.
+        typedef size_t hashcode_t;
+        //! Segment index type
+        typedef size_t segment_index_t;
+        //! Node base type
+        typedef hash_map_node_base node_base;
+        //! Bucket type
+        struct bucket : tbb::internal::no_copy {
+            //! Mutex type for buckets
+            typedef spin_rw_mutex mutex_t;
+            //! Scoped lock type for mutex
+            typedef mutex_t::scoped_lock scoped_t;
+            mutex_t mutex;
+            node_base *node_list;
+        };
+        //! Count of segments in the first block
+        static size_type const embedded_block = 1;
+        //! Count of segments in the first block
+        static size_type const embedded_buckets = 1<<embedded_block;
+        //! Count of segments in the first block
+        static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
+        //! Size of a pointer / table size
+        static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
+        //! Segment pointer
+        typedef bucket *segment_ptr_t;
+        //! Segment pointers table type
+        typedef segment_ptr_t segments_table_t[pointers_per_table];
+        //! Hash mask = sum of allocated segment sizes - 1
+        atomic<hashcode_t> my_mask;
+        //! Segment pointers table. Also prevents false sharing between my_mask and my_size
+        segments_table_t my_table;
+        //! Size of container in stored items
+        atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
+        //! Zero segment
+        bucket my_embedded_segment[embedded_buckets];
+#if __TBB_STATISTICS
+        atomic<unsigned> my_info_resizes; // concurrent ones
+        mutable atomic<unsigned> my_info_restarts; // race collisions
+        atomic<unsigned> my_info_rehashes;  // invocations of rehash_bucket
+        #if !TBB_USE_PERFORMANCE_WARNINGS
+        #error Please enable TBB_USE_PERFORMANCE_WARNINGS as well
+        #endif
+#endif
+        //! Constructor
+        hash_map_base() {
+            std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128   or 64*8=512
+                + sizeof(my_size) + sizeof(my_mask)  // 4+4 or 8+8
+                + embedded_buckets*sizeof(bucket) ); // n*8 or n*16
+            for( size_type i = 0; i < embedded_block; i++ ) // fill the table
+                my_table[i] = my_embedded_segment + segment_base(i);
+            my_mask = embedded_buckets - 1;
+            __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
+#if __TBB_STATISTICS
+            my_info_resizes = 0; // concurrent ones
+            my_info_restarts = 0; // race collisions
+            my_info_rehashes = 0;  // invocations of rehash_bucket
+#endif
+        }
+
+        //! @return segment index of given index in the array
+        static segment_index_t segment_index_of( size_type index ) {
+            return segment_index_t( __TBB_Log2( index|1 ) );
+        }
+
+        //! @return the first array index of given segment
+        static segment_index_t segment_base( segment_index_t k ) {
+            return (segment_index_t(1)<<k & ~segment_index_t(1));
+        }
+
+        //! @return segment size except for @arg k == 0
+        static size_type segment_size( segment_index_t k ) {
+            return size_type(1)<<k; // fake value for k==0
+        }
+        
+        //! @return true if @arg ptr is valid pointer
+        static bool is_valid( void *ptr ) {
+            return reinterpret_cast<size_t>(ptr) > size_t(63);
+        }
+
+        //! Initialize buckets
+        static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
+            if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
+            else for(size_type i = 0; i < sz; i++, ptr++) {
+                    *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
+                    ptr->node_list = rehash_req;
+                }
+        }
+        
+        //! Add node @arg n to bucket @arg b
+        static void add_to_bucket( bucket *b, node_base *n ) {
+            __TBB_ASSERT(b->node_list != rehash_req, NULL);
+            n->next = b->node_list;
+            b->node_list = n; // its under lock and flag is set
+        }
+
+        //! Exception safety helper
+        struct enable_segment_failsafe {
+            segment_ptr_t *my_segment_ptr;
+            enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
+            ~enable_segment_failsafe() {
+                if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
+            }
+        };
+
+        //! Enable segment
+        void enable_segment( segment_index_t k, bool is_initial = false ) {
+            __TBB_ASSERT( k, "Zero segment must be embedded" );
+            enable_segment_failsafe watchdog( my_table, k );
+            cache_aligned_allocator<bucket> alloc;
+            size_type sz;
+            __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
+            if( k >= first_block ) {
+                sz = segment_size( k );
+                segment_ptr_t ptr = alloc.allocate( sz );
+                init_buckets( ptr, sz, is_initial );
+#if TBB_USE_THREADING_TOOLS
+                // TODO: actually, fence and notification are unnecessary here and below
+                itt_store_pointer_with_release_v3( my_table + k, ptr );
+#else
+                my_table[k] = ptr;// my_mask has release fence
+#endif
+                sz <<= 1;// double it to get entire capacity of the container
+            } else { // the first block
+                __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
+                sz = segment_size( first_block );
+                segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
+                init_buckets( ptr, sz - embedded_buckets, is_initial );
+                ptr -= segment_base(embedded_block);
+                for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
+#if TBB_USE_THREADING_TOOLS
+                    itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
+#else
+                    my_table[i] = ptr + segment_base(i);
+#endif
+            }
+#if TBB_USE_THREADING_TOOLS
+            itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
+#else
+            my_mask = sz - 1;
+#endif
+            watchdog.my_segment_ptr = 0;
+        }
+
+        //! Get bucket by (masked) hashcode
+        bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
+            segment_index_t s = segment_index_of( h );
+            h -= segment_base(s);
+            segment_ptr_t seg = my_table[s];
+            __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
+            return &seg[h];
+        }
+
+        // internal serial rehashing helper
+        void mark_rehashed_levels( hashcode_t h ) throw () {
+            segment_index_t s = segment_index_of( h );
+            while( segment_ptr_t seg = my_table[++s] )
+                if( seg[h].node_list == rehash_req ) {
+                    seg[h].node_list = empty_rehashed;
+                    mark_rehashed_levels( h + segment_base(s) );
+                }
+        }
+
+        //! Check for mask race
+        // Splitting into two functions should help inlining
+        inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
+            hashcode_t m_now, m_old = m;
+#if TBB_USE_THREADING_TOOLS
+            m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+            m_now = my_mask;
+#endif
+            if( m_old != m_now )
+                return check_rehashing_collision( h, m_old, m = m_now );
+            return false;
+        }
+
+        //! Process mask race, check for rehashing collision
+        bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
+            __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
+            if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
+                // condition above proves that 'h' has some other bits set beside 'm_old'
+                // find next applicable mask after m_old    //TODO: look at bsl instruction
+                for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
+                    ;
+                m_old = (m_old<<1) - 1; // get full mask from a bit
+                __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
+                // check whether it is rehashing/ed
+#if TBB_USE_THREADING_TOOLS
+                if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
+#else
+                if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
+#endif
+                {
+#if __TBB_STATISTICS
+                    my_info_restarts++; // race collisions
+#endif
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        //! Insert a node and check for load factor. @return segment index to enable.
+        segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
+            size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
+            add_to_bucket( b, n );
+            // check load factor
+            if( sz >= mask ) { // TODO: add custom load_factor 
+                segment_index_t new_seg = segment_index_of( mask+1 );
+                __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
+#if TBB_USE_THREADING_TOOLS
+                if( !itt_load_pointer_v3(my_table+new_seg)
+#else
+                if( !my_table[new_seg]
+#endif
+                  && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
+                    return new_seg; // The value must be processed
+            }
+            return 0;
+        }
+
+        //! Prepare enough segments for number of buckets
+        void reserve(size_type buckets) {
+            if( !buckets-- ) return;
+            bool is_initial = !my_size;
+            for( size_type m = my_mask; buckets > m; m = my_mask )
+                enable_segment( segment_index_of( m+1 ), is_initial );
+        }
+        //! Swap hash_map_bases
+        void internal_swap(hash_map_base &table) {
+            std::swap(this->my_mask, table.my_mask);
+            std::swap(this->my_size, table.my_size);
+            for(size_type i = 0; i < embedded_buckets; i++)
+                std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
+            for(size_type i = embedded_block; i < pointers_per_table; i++)
+                std::swap(this->my_table[i], table.my_table[i]);
+        }
+    };
+
+    template<typename Iterator>
+    class hash_map_range;
+
+    //! Meets requirements of a forward iterator for STL */
+    /** Value is either the T or const T type of the container.
+        @ingroup containers */ 
+    template<typename Container, typename Value>
+    class hash_map_iterator
+        : public std::iterator<std::forward_iterator_tag,Value>
+    {
+        typedef Container map_type;
+        typedef typename Container::node node;
+        typedef hash_map_base::node_base node_base;
+        typedef hash_map_base::bucket bucket;
+
+        template<typename C, typename T, typename U>
+        friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
+    
+        template<typename C, typename U>
+        friend class hash_map_iterator;
+
+        template<typename I>
+        friend class hash_map_range;
+
+        void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
+            size_t k = my_index+1;
+            while( my_bucket && k <= my_map->my_mask ) {
+                // Following test uses 2's-complement wizardry
+                if( k& (k-2) ) // not the beginning of a segment
+                    ++my_bucket;
+                else my_bucket = my_map->get_bucket( k );
+                my_node = static_cast<node*>( my_bucket->node_list );
+                if( hash_map_base::is_valid(my_node) ) {
+                    my_index = k; return;
+                }
+                ++k;
+            }
+            my_bucket = 0; my_node = 0; my_index = k; // the end
+        }
+#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+        template<typename Key, typename T, typename HashCompare, typename A>
+        friend class interface4::concurrent_hash_map;
+#else
+    public: // workaround
+#endif
+        //! concurrent_hash_map over which we are iterating.
+        const Container *my_map;
+
+        //! Index in hash table for current item
+        size_t my_index;
+
+        //! Pointer to bucket
+        const bucket *my_bucket;
+
+        //! Pointer to node that has current item
+        node *my_node;
+
+        hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
+
+    public:
+        //! Construct undefined iterator
+        hash_map_iterator() {}
+        hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
+            my_map(other.my_map),
+            my_index(other.my_index),
+            my_bucket(other.my_bucket),
+            my_node(other.my_node)
+        {}
+        Value& operator*() const {
+            __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
+            return my_node->item;
+        }
+        Value* operator->() const {return &operator*();}
+        hash_map_iterator& operator++();
+        
+        //! Post increment
+        hash_map_iterator operator++(int) {
+            hash_map_iterator old(*this);
+            operator++();
+            return old;
+        }
+    };
+
+    template<typename Container, typename Value>
+    hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
+        my_map(&map),
+        my_index(index),
+        my_bucket(b),
+        my_node( static_cast<node*>(n) )
+    {
+        if( b && !hash_map_base::is_valid(n) )
+            advance_to_next_bucket();
+    }
+
+    template<typename Container, typename Value>
+    hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
+        my_node = static_cast<node*>( my_node->next );
+        if( !my_node ) advance_to_next_bucket();
+        return *this;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
+        return i.my_node == j.my_node && i.my_map == j.my_map;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
+        return i.my_node != j.my_node || i.my_map != j.my_map;
+    }
+
+    //! Range class used with concurrent_hash_map
+    /** @ingroup containers */ 
+    template<typename Iterator>
+    class hash_map_range {
+        typedef typename Iterator::map_type map_type;
+        Iterator my_begin;
+        Iterator my_end;
+        mutable Iterator my_midpoint;
+        size_t my_grainsize;
+        //! Set my_midpoint to point approximately half way between my_begin and my_end.
+        void set_midpoint() const;
+        template<typename U> friend class hash_map_range;
+    public:
+        //! Type for size of a range
+        typedef std::size_t size_type;
+        typedef typename Iterator::value_type value_type;
+        typedef typename Iterator::reference reference;
+        typedef typename Iterator::difference_type difference_type;
+        typedef Iterator iterator;
+
+        //! True if range is empty.
+        bool empty() const {return my_begin==my_end;}
+
+        //! True if range can be partitioned into two subranges.
+        bool is_divisible() const {
+            return my_midpoint!=my_end;
+        }
+        //! Split range.
+        hash_map_range( hash_map_range& r, split ) : 
+            my_end(r.my_end),
+            my_grainsize(r.my_grainsize)
+        {
+            r.my_end = my_begin = r.my_midpoint;
+            __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
+            __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
+            set_midpoint();
+            r.set_midpoint();
+        }
+        //! type conversion
+        template<typename U>
+        hash_map_range( hash_map_range<U>& r) : 
+            my_begin(r.my_begin),
+            my_end(r.my_end),
+            my_midpoint(r.my_midpoint),
+            my_grainsize(r.my_grainsize)
+        {}
+#if TBB_DEPRECATED
+        //! Init range with iterators and grainsize specified
+        hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) : 
+            my_begin(begin_), 
+            my_end(end_),
+            my_grainsize(grainsize_)
+        {
+            if(!my_end.my_index && !my_end.my_bucket) // end
+                my_end.my_index = my_end.my_map->my_mask + 1;
+            set_midpoint();
+            __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
+        }
+#endif
+        //! Init range with container and grainsize specified
+        hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : 
+            my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
+            my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
+            my_grainsize( grainsize_ )
+        {
+            __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
+            set_midpoint();
+        }
+        const Iterator& begin() const {return my_begin;}
+        const Iterator& end() const {return my_end;}
+        //! The grain size for this range.
+        size_type grainsize() const {return my_grainsize;}
+    };
+
+    template<typename Iterator>
+    void hash_map_range<Iterator>::set_midpoint() const {
+        // Split by groups of nodes
+        size_t m = my_end.my_index-my_begin.my_index;
+        if( m > my_grainsize ) {
+            m = my_begin.my_index + m/2u;
+            hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
+            my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
+        } else {
+            my_midpoint = my_end;
+        }
+        __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
+            "my_begin is after my_midpoint" );
+        __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
+            "my_midpoint is after my_end" );
+        __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
+            "[my_begin, my_midpoint) range should not be empty" );
+    }
+
+    } // internal
+//! @endcond
+
+//! Unordered map from Key to T.
+/** concurrent_hash_map is associative container with concurrent access.
+
+@par Compatibility
+    The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1).
+
+@par Exception Safety
+    - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors.
+    - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment).
+    - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results.
+
+@par Changes since TBB 2.1
+    - Replaced internal algorithm and data structure. Patent is pending.
+    - Added buckets number argument for constructor
+
+@par Changes since TBB 2.0
+    - Fixed exception-safety
+    - Added template argument for allocator
+    - Added allocator argument in constructors
+    - Added constructor from a range of iterators
+    - Added several new overloaded insert() methods
+    - Added get_allocator()
+    - Added swap()
+    - Added count()
+    - Added overloaded erase(accessor &) and erase(const_accessor&)
+    - Added equal_range() [const]
+    - Added [const_]pointer, [const_]reference, and allocator_type types
+    - Added global functions: operator==(), operator!=(), and swap() 
+
+    @ingroup containers */
+template<typename Key, typename T, typename HashCompare, typename Allocator>
+class concurrent_hash_map : protected internal::hash_map_base {
+    template<typename Container, typename Value>
+    friend class internal::hash_map_iterator;
+
+    template<typename I>
+    friend class internal::hash_map_range;
+
+public:
+    typedef Key key_type;
+    typedef T mapped_type;
+    typedef std::pair<const Key,T> value_type;
+    typedef hash_map_base::size_type size_type;
+    typedef ptrdiff_t difference_type;
+    typedef value_type *pointer;
+    typedef const value_type *const_pointer;
+    typedef value_type &reference;
+    typedef const value_type &const_reference;
+    typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
+    typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
+    typedef internal::hash_map_range<iterator> range_type;
+    typedef internal::hash_map_range<const_iterator> const_range_type;
+    typedef Allocator allocator_type;
+
+protected:
+    friend class const_accessor;
+    struct node;
+    typedef typename Allocator::template rebind<node>::other node_allocator_type;
+    node_allocator_type my_allocator;
+    HashCompare my_hash_compare;
+
+    struct node : public node_base {
+        value_type item;
+        node( const Key &key ) : item(key, T()) {}
+        node( const Key &key, const T &t ) : item(key, t) {}
+        // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17
+        void *operator new( size_t /*size*/, node_allocator_type &a ) {
+            void *ptr = a.allocate(1);
+            if(!ptr) 
+                tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
+            return ptr;
+        }
+        // match placement-new form above to be called if exception thrown in constructor
+        void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
+    };
+
+    void delete_node( node_base *n ) {
+        my_allocator.destroy( static_cast<node*>(n) );
+        my_allocator.deallocate( static_cast<node*>(n), 1);
+    }
+
+    node *search_bucket( const key_type &key, bucket *b ) const {
+        node *n = static_cast<node*>( b->node_list );
+        while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
+            n = static_cast<node*>( n->next );
+        __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
+        return n;
+    }
+
+    //! bucket accessor is to find, rehash, acquire a lock, and access a bucket
+    class bucket_accessor : public bucket::scoped_t {
+        bool my_is_writer; // TODO: use it from base type
+        bucket *my_b;
+    public:
+        bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
+        //! find a bucket by masked hashcode, optionally rehash, and acquire the lock
+        inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
+            my_b = base->get_bucket( h );
+#if TBB_USE_THREADING_TOOLS
+            // TODO: actually, notification is unnecessary here, just hiding double-check
+            if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
+#else
+            if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
+#endif
+                && try_acquire( my_b->mutex, /*write=*/true ) )
+            {
+                if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
+                my_is_writer = true;
+            }
+            else bucket::scoped_t::acquire( my_b->mutex, /*write=*/my_is_writer = writer );
+            __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
+        }
+        //! check whether bucket is locked for write
+        bool is_writer() { return my_is_writer; }
+        //! get bucket pointer
+        bucket *operator() () { return my_b; }
+        // TODO: optimize out
+        bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
+    };
+
+    // TODO refactor to hash_base
+    void rehash_bucket( bucket *b_new, const hashcode_t h ) {
+        __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
+        __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
+        __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
+        hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
+#if __TBB_STATISTICS
+        my_info_rehashes++; // invocations of rehash_bucket
+#endif
+
+        bucket_accessor b_old( this, h & mask );
+
+        mask = (mask<<1) | 1; // get full mask for new bucket
+        __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
+    restart:
+        for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
+            hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
+#if TBB_USE_ASSERT
+            hashcode_t bmask = h & (mask>>1);
+            bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
+            __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
+#endif
+            if( (c & mask) == h ) {
+                if( !b_old.is_writer() )
+                    if( !b_old.upgrade_to_writer() ) {
+                        goto restart; // node ptr can be invalid due to concurrent erase
+                    }
+                *p = n->next; // exclude from b_old
+                add_to_bucket( b_new, n );
+            } else p = &n->next; // iterate to next item
+        }
+    }
+
+public:
+    
+    class accessor;
+    //! Combines data access, locking, and garbage collection.
+    class const_accessor {
+        friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
+        friend class accessor;
+        void operator=( const accessor & ) const; // Deny access
+        const_accessor( const accessor & );       // Deny access
+    public:
+        //! Type of value
+        typedef const typename concurrent_hash_map::value_type value_type;
+
+        //! True if result is empty.
+        bool empty() const {return !my_node;}
+
+        //! Set to null
+        void release() {
+            if( my_node ) {
+                my_lock.release();
+                my_node = 0;
+            }
+        }
+
+        //! Return reference to associated value in hash table.
+        const_reference operator*() const {
+            __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
+            return my_node->item;
+        }
+
+        //! Return pointer to associated value in hash table.
+        const_pointer operator->() const {
+            return &operator*();
+        }
+
+        //! Create empty result
+        const_accessor() : my_node(NULL) {}
+
+        //! Destroy result after releasing the underlying reference.
+        ~const_accessor() {
+            my_node = NULL; // my_lock.release() is called in scoped_lock destructor
+        }
+    private:
+        node *my_node;
+        typename node::scoped_t my_lock;
+        hashcode_t my_hash;
+    };
+
+    //! Allows write access to elements and combines data access, locking, and garbage collection.
+    class accessor: public const_accessor {
+    public:
+        //! Type of value
+        typedef typename concurrent_hash_map::value_type value_type;
+
+        //! Return reference to associated value in hash table.
+        reference operator*() const {
+            __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
+            return this->my_node->item;
+        }
+
+        //! Return pointer to associated value in hash table.
+        pointer operator->() const {
+            return &operator*();
+        }
+    };
+
+    //! Construct empty table.
+    concurrent_hash_map(const allocator_type &a = allocator_type())
+        : internal::hash_map_base(), my_allocator(a)
+    {}
+
+    //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level.
+    concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
+        : my_allocator(a)
+    {
+        reserve( n );
+    }
+
+    //! Copy constructor
+    concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
+        : internal::hash_map_base(), my_allocator(a)
+    {
+        internal_copy(table);
+    }
+
+    //! Construction with copying iteration range and given allocator instance
+    template<typename I>
+    concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
+        : my_allocator(a)
+    {
+        reserve( std::distance(first, last) ); // TODO: load_factor?
+        internal_copy(first, last);
+    }
+
+    //! Assignment
+    concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
+        if( this!=&table ) {
+            clear();
+            internal_copy(table);
+        } 
+        return *this;
+    }
+
+
+    //! Rehashes and optionally resizes the whole table.
+    /** Useful to optimize performance before or after concurrent operations.
+        Also enables using of find() and count() concurrent methods in serial context. */
+    void rehash(size_type n = 0);
+    
+    //! Clear table
+    void clear();
+
+    //! Clear table and destroy it.  
+    ~concurrent_hash_map() { clear(); }
+
+    //------------------------------------------------------------------------
+    // Parallel algorithm support
+    //------------------------------------------------------------------------
+    range_type range( size_type grainsize=1 ) {
+        return range_type( *this, grainsize );
+    }
+    const_range_type range( size_type grainsize=1 ) const {
+        return const_range_type( *this, grainsize );
+    }
+
+    //------------------------------------------------------------------------
+    // STL support - not thread-safe methods
+    //------------------------------------------------------------------------
+    iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
+    iterator end() {return iterator(*this,0,0,0);}
+    const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
+    const_iterator end() const {return const_iterator(*this,0,0,0);}
+    std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
+    std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
+    
+    //! Number of items in table.
+    size_type size() const { return my_size; }
+
+    //! True if size()==0.
+    bool empty() const { return my_size == 0; }
+
+    //! Upper bound on size.
+    size_type max_size() const {return (~size_type(0))/sizeof(node);}
+
+    //! Returns the current number of buckets
+    size_type bucket_count() const { return my_mask+1; }
+
+    //! return allocator object
+    allocator_type get_allocator() const { return this->my_allocator; }
+
+    //! swap two instances. Iterators are invalidated
+    void swap(concurrent_hash_map &table);
+
+    //------------------------------------------------------------------------
+    // concurrent map operations
+    //------------------------------------------------------------------------
+
+    //! Return count of items (0 or 1)
+    size_type count( const Key &key ) const {
+        return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false );
+    }
+
+    //! Find item and acquire a read lock on the item.
+    /** Return true if item is found, false otherwise. */
+    bool find( const_accessor &result, const Key &key ) const {
+        result.release();
+        return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false );
+    }
+
+    //! Find item and acquire a write lock on the item.
+    /** Return true if item is found, false otherwise. */
+    bool find( accessor &result, const Key &key ) {
+        result.release();
+        return lookup(/*insert*/false, key, NULL, &result, /*write=*/true );
+    }
+        
+    //! Insert item (if not already present) and acquire a read lock on the item.
+    /** Returns true if item is new. */
+    bool insert( const_accessor &result, const Key &key ) {
+        result.release();
+        return lookup(/*insert*/true, key, NULL, &result, /*write=*/false );
+    }
+
+    //! Insert item (if not already present) and acquire a write lock on the item.
+    /** Returns true if item is new. */
+    bool insert( accessor &result, const Key &key ) {
+        result.release();
+        return lookup(/*insert*/true, key, NULL, &result, /*write=*/true );
+    }
+
+    //! Insert item by copying if there is no such key present already and acquire a read lock on the item.
+    /** Returns true if item is new. */
+    bool insert( const_accessor &result, const value_type &value ) {
+        result.release();
+        return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false );
+    }
+
+    //! Insert item by copying if there is no such key present already and acquire a write lock on the item.
+    /** Returns true if item is new. */
+    bool insert( accessor &result, const value_type &value ) {
+        result.release();
+        return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true );
+    }
+
+    //! Insert item by copying if there is no such key present already
+    /** Returns true if item is inserted. */
+    bool insert( const value_type &value ) {
+        return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false );
+    }
+
+    //! Insert range [first, last)
+    template<typename I>
+    void insert(I first, I last) {
+        for(; first != last; ++first)
+            insert( *first );
+    }
+
+    //! Erase item.
+    /** Return true if item was erased by particularly this call. */
+    bool erase( const Key& key );
+
+    //! Erase item by const_accessor.
+    /** Return true if item was erased by particularly this call. */
+    bool erase( const_accessor& item_accessor ) {
+        return exclude( item_accessor, /*readonly=*/ true );
+    }
+
+    //! Erase item by accessor.
+    /** Return true if item was erased by particularly this call. */
+    bool erase( accessor& item_accessor ) {
+        return exclude( item_accessor, /*readonly=*/ false );
+    }
+
+protected:
+    //! Insert or find item and optionally acquire a lock on the item.
+    bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
+
+    //! delete item by accessor
+    bool exclude( const_accessor &item_accessor, bool readonly );
+
+    //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
+    template<typename I>
+    std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
+
+    //! Copy "source" to *this, where *this must start out empty.
+    void internal_copy( const concurrent_hash_map& source );
+
+    template<typename I>
+    void internal_copy(I first, I last);
+
+    //! Fast find when no concurrent erasure is used. For internal use inside TBB only!
+    /** Return pointer to item with given key, or NULL if no such item exists.
+        Must not be called concurrently with erasure operations. */
+    const_pointer internal_fast_find( const Key& key ) const {
+        hashcode_t h = my_hash_compare.hash( key );
+#if TBB_USE_THREADING_TOOLS
+        hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+        hashcode_t m = my_mask;
+#endif
+        node *n;
+    restart:
+        __TBB_ASSERT((m&(m+1))==0, NULL);
+        bucket *b = get_bucket( h & m );
+#if TBB_USE_THREADING_TOOLS
+        // TODO: actually, notification is unnecessary here, just hiding double-check
+        if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
+#else
+        if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
+#endif
+        {
+            bucket::scoped_t lock;
+            if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
+                if( b->node_list == internal::rehash_req)
+                    const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
+            }
+            else lock.acquire( b->mutex, /*write=*/false );
+            __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
+        }
+        n = search_bucket( key, b );
+        if( n )
+            return &n->item;
+        else if( check_mask_race( h, m ) )
+            goto restart;
+        return 0;
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress "conditional expression is constant" warning.
+    #pragma warning( push )
+    #pragma warning( disable: 4127 )
+#endif
+
+template<typename Key, typename T, typename HashCompare, typename A>
+bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
+    __TBB_ASSERT( !result || !result->my_node, NULL );
+    bool return_value;
+    hashcode_t const h = my_hash_compare.hash( key );
+#if TBB_USE_THREADING_TOOLS
+    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+    hashcode_t m = my_mask;
+#endif
+    segment_index_t grow_segment = 0;
+    node *n, *tmp_n = 0;
+    restart:
+    {//lock scope
+        __TBB_ASSERT((m&(m+1))==0, NULL);
+        return_value = false;
+        // get bucket
+        bucket_accessor b( this, h & m );
+
+        // find a node
+        n = search_bucket( key, b() );
+        if( op_insert ) {
+            // [opt] insert a key
+            if( !n ) {
+                if( !tmp_n ) {
+                    if(t) tmp_n = new( my_allocator ) node(key, *t);
+                    else  tmp_n = new( my_allocator ) node(key);
+                }
+                if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
+                    // Rerun search_list, in case another thread inserted the item during the upgrade.
+                    n = search_bucket( key, b() );
+                    if( is_valid(n) ) { // unfortunately, it did
+                        b.downgrade_to_reader();
+                        goto exists;
+                    }
+                }
+                if( check_mask_race(h, m) )
+                    goto restart; // b.release() is done in ~b().
+                // insert and set flag to grow the container
+                grow_segment = insert_new_node( b(), n = tmp_n, m );
+                tmp_n = 0;
+                return_value = true;
+            }
+        } else { // find or count
+            if( !n ) {
+                if( check_mask_race( h, m ) )
+                    goto restart; // b.release() is done in ~b(). TODO: replace by continue
+                return false;
+            }
+            return_value = true;
+        }
+    exists:
+        if( !result ) goto check_growth;
+        // TODO: the following seems as generic/regular operation
+        // acquire the item
+        if( !result->my_lock.try_acquire( n->mutex, write ) ) {
+            // we are unlucky, prepare for longer wait
+            tbb::internal::atomic_backoff trials;
+            do {
+                if( !trials.bounded_pause() ) {
+                    // the wait takes really long, restart the operation
+                    b.release();
+                    __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
+                    __TBB_Yield();
+#if TBB_USE_THREADING_TOOLS
+                    m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+                    m = my_mask;
+#endif
+                    goto restart;
+                }
+            } while( !result->my_lock.try_acquire( n->mutex, write ) );
+        }
+    }//lock scope
+    result->my_node = n;
+    result->my_hash = h;
+check_growth:
+    // [opt] grow the container
+    if( grow_segment ) {
+#if __TBB_STATISTICS
+        my_info_resizes++; // concurrent ones
+#endif
+        enable_segment( grow_segment );
+    }
+    if( tmp_n ) // if op_insert only
+        delete_node( tmp_n );
+    return return_value;
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+template<typename I>
+std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
+    hashcode_t h = my_hash_compare.hash( key );
+    hashcode_t m = my_mask;
+    __TBB_ASSERT((m&(m+1))==0, NULL);
+    h &= m;
+    bucket *b = get_bucket( h );
+    while( b->node_list == internal::rehash_req ) {
+        m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
+        b = get_bucket( h &= m );
+    }
+    node *n = search_bucket( key, b );
+    if( !n )
+        return std::make_pair(end_, end_);
+    iterator lower(*this, h, b, n), upper(lower);
+    return std::make_pair(lower, ++upper);
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
+    __TBB_ASSERT( item_accessor.my_node, NULL );
+    node_base *const n = item_accessor.my_node;
+    item_accessor.my_node = NULL; // we ought release accessor anyway
+    hashcode_t const h = item_accessor.my_hash;
+#if TBB_USE_THREADING_TOOLS
+    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+    hashcode_t m = my_mask;
+#endif
+    do {
+        // get bucket
+        bucket_accessor b( this, h & m, /*writer=*/true );
+        node_base **p = &b()->node_list;
+        while( *p && *p != n )
+            p = &(*p)->next;
+        if( !*p ) { // someone else was the first
+            if( check_mask_race( h, m ) )
+                continue;
+            item_accessor.my_lock.release();
+            return false;
+        }
+        __TBB_ASSERT( *p == n, NULL );
+        *p = n->next; // remove from container
+        my_size--;
+        break;
+    } while(true);
+    if( readonly ) // need to get exclusive lock
+        item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here
+    item_accessor.my_lock.release();
+    delete_node( n ); // Only one thread can delete it due to write lock on the chain_mutex
+    return true;
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
+    node_base *n;
+    hashcode_t const h = my_hash_compare.hash( key );
+#if TBB_USE_THREADING_TOOLS
+    hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
+#else
+    hashcode_t m = my_mask;
+#endif
+restart:
+    {//lock scope
+        // get bucket
+        bucket_accessor b( this, h & m );
+    search:
+        node_base **p = &b()->node_list;
+        n = *p;
+        while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
+            p = &n->next;
+            n = *p;
+        }
+        if( !n ) { // not found, but mask could be changed
+            if( check_mask_race( h, m ) )
+                goto restart;
+            return false;
+        }
+        else if( !b.is_writer() && !b.upgrade_to_writer() ) {
+            if( check_mask_race( h, m ) ) // contended upgrade, check mask
+                goto restart;
+            goto search;
+        }
+        *p = n->next;
+        my_size--;
+    }
+    {
+        typename node::scoped_t item_locker( n->mutex, /*write=*/true );
+    }
+    // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
+    delete_node( n ); // Only one thread can delete it due to write lock on the bucket
+    return true;
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
+    std::swap(this->my_allocator, table.my_allocator);
+    std::swap(this->my_hash_compare, table.my_hash_compare);
+    internal_swap(table);
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
+    reserve( sz ); // TODO: add reduction of number of buckets as well
+    hashcode_t mask = my_mask;
+    hashcode_t b = (mask+1)>>1; // size or first index of the last segment
+    __TBB_ASSERT((b&(b-1))==0, NULL);
+    bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
+    for(; b <= mask; b++, bp++ ) {
+        node_base *n = bp->node_list;
+        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
+        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
+        if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
+            hashcode_t h = b; bucket *b_old = bp;
+            do {
+                __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
+                hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
+                b_old = get_bucket( h &= m );
+            } while( b_old->node_list == internal::rehash_req );
+            // now h - is index of the root rehashed bucket b_old
+            mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
+            for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
+                hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
+                if( (c & mask) != h ) { // should be rehashed
+                    *p = q->next; // exclude from b_old
+                    bucket *b_new = get_bucket( c & mask );
+                    __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
+                    add_to_bucket( b_new, q );
+                } else p = &q->next; // iterate to next item
+            }
+        }
+    }
+#if TBB_USE_PERFORMANCE_WARNINGS
+    int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
+    static bool reported = false;
+#endif
+#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
+    for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
+        if( b & (b-2) ) ++bp; // not the beginning of a segment
+        else bp = get_bucket( b );
+        node_base *n = bp->node_list;
+        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
+        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
+#if TBB_USE_PERFORMANCE_WARNINGS
+        if( n == internal::empty_rehashed ) empty_buckets++;
+        else if( n->next ) overpopulated_buckets++;
+#endif
+#if TBB_USE_ASSERT
+        for( ; is_valid(n); n = n->next ) {
+            hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
+            __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
+        }
+#endif
+    }
+#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
+#if TBB_USE_PERFORMANCE_WARNINGS
+    if( buckets > current_size) empty_buckets -= buckets - current_size;
+    else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
+    if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
+        tbb::internal::runtime_warning(
+            "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
+            typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
+        reported = true;
+    }
+#endif
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
+    hashcode_t m = my_mask;
+    __TBB_ASSERT((m&(m+1))==0, NULL);
+#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
+#if TBB_USE_PERFORMANCE_WARNINGS
+    int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
+    static bool reported = false;
+#endif
+    bucket *bp = 0;
+    // check consistency
+    for( segment_index_t b = 0; b <= m; b++ ) {
+        if( b & (b-2) ) ++bp; // not the beginning of a segment
+        else bp = get_bucket( b );
+        node_base *n = bp->node_list;
+        __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
+        __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
+#if TBB_USE_PERFORMANCE_WARNINGS
+        if( n == internal::empty_rehashed ) empty_buckets++;
+        else if( n == internal::rehash_req ) buckets--;
+        else if( n->next ) overpopulated_buckets++;
+#endif
+#if __TBB_EXTRA_DEBUG
+        for(; is_valid(n); n = n->next ) {
+            hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
+            h &= m;
+            __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
+        }
+#endif
+    }
+#if TBB_USE_PERFORMANCE_WARNINGS
+#if __TBB_STATISTICS
+    printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
+        " concurrent: resizes=%u rehashes=%u restarts=%u\n",
+        current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
+        unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
+    my_info_resizes = 0; // concurrent ones
+    my_info_restarts = 0; // race collisions
+    my_info_rehashes = 0;  // invocations of rehash_bucket
+#endif
+    if( buckets > current_size) empty_buckets -= buckets - current_size;
+    else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
+    if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
+        tbb::internal::runtime_warning(
+            "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d  Empties: %d  Overlaps: %d",
+            typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
+        reported = true;
+    }
+#endif
+#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
+    my_size = 0;
+    segment_index_t s = segment_index_of( m );
+    __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
+    cache_aligned_allocator<bucket> alloc;
+    do {
+        __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
+        segment_ptr_t buckets_ptr = my_table[s];
+        size_type sz = segment_size( s ? s : 1 );
+        for( segment_index_t i = 0; i < sz; i++ )
+            for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
+                buckets_ptr[i].node_list = n->next;
+                delete_node( n );
+            }
+        if( s >= first_block) // the first segment or the next
+            alloc.deallocate( buckets_ptr, sz );
+        else if( s == embedded_block && embedded_block != first_block )
+            alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
+        if( s >= embedded_block ) my_table[s] = 0;
+    } while(s-- > 0);
+    my_mask = embedded_buckets - 1;
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
+    reserve( source.my_size ); // TODO: load_factor?
+    hashcode_t mask = source.my_mask;
+    if( my_mask == mask ) { // optimized version
+        bucket *dst = 0, *src = 0;
+        bool rehash_required = false;
+        for( hashcode_t k = 0; k <= mask; k++ ) {
+            if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
+            else { dst = get_bucket( k ); src = source.get_bucket( k ); }
+            __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
+            node *n = static_cast<node*>( src->node_list );
+            if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
+                rehash_required = true;
+                dst->node_list = internal::rehash_req;
+            } else for(; n; n = static_cast<node*>( n->next ) ) {
+                add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
+                ++my_size; // TODO: replace by non-atomic op
+            }
+        }
+        if( rehash_required ) rehash();
+    } else internal_copy( source.begin(), source.end() );
+}
+
+template<typename Key, typename T, typename HashCompare, typename A>
+template<typename I>
+void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
+    hashcode_t m = my_mask;
+    for(; first != last; ++first) {
+        hashcode_t h = my_hash_compare.hash( first->first );
+        bucket *b = get_bucket( h & m );
+        __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
+        node *n = new( my_allocator ) node(first->first, first->second);
+        add_to_bucket( b, n );
+        ++my_size; // TODO: replace by non-atomic op
+    }
+}
+
+} // namespace interface4
+
+using interface4::concurrent_hash_map;
+
+
+template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
+inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
+    if(a.size() != b.size()) return false;
+    typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
+    typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
+    for(; i != i_end; ++i) {
+        j = b.equal_range(i->first).first;
+        if( j == j_end || !(i->second == j->second) ) return false;
+    }
+    return true;
+}
+
+template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
+inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
+{    return !(a == b); }
+
+template<typename Key, typename T, typename HashCompare, typename A>
+inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
+{    a.swap( b ); }
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif // warning 4127 is back
+
+} // namespace tbb
+
+#endif /* __TBB_concurrent_hash_map_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_queue.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_queue.h
new file mode 100644 (file)
index 0000000..6f50217
--- /dev/null
@@ -0,0 +1,413 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_queue_H
+#define __TBB_concurrent_queue_H
+
+#include "_concurrent_queue_internal.h"
+
+namespace tbb {
+
+namespace strict_ppl {
+
+//! A high-performance thread-safe non-blocking concurrent queue.
+/** Multiple threads may each push and pop concurrently.
+    Assignment construction is not allowed.
+    @ingroup containers */
+template<typename T, typename A = cache_aligned_allocator<T> > 
+class concurrent_queue: public internal::concurrent_queue_base_v3<T> {
+    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;
+
+    //! Allocator type
+    typedef typename A::template rebind<char>::other page_allocator_type;
+    page_allocator_type my_allocator;
+
+    //! Allocates a block of size n (bytes)
+    /*overide*/ virtual void *allocate_block( size_t n ) {
+        void *b = reinterpret_cast<void*>(my_allocator.allocate( n ));
+        if( !b )
+            internal::throw_exception(internal::eid_bad_alloc); 
+        return b;
+    }
+
+    //! Deallocates block created by allocate_block.
+    /*override*/ virtual void deallocate_block( void *b, size_t n ) {
+        my_allocator.deallocate( reinterpret_cast<char*>(b), n );
+    }
+
+public:
+    //! Element type in the queue.
+    typedef T value_type;
+
+    //! Reference type
+    typedef T& reference;
+
+    //! Const reference type
+    typedef const T& const_reference;
+
+    //! Integral type for representing size of the queue.
+    typedef size_t size_type;
+
+    //! Difference type for iterator
+    typedef ptrdiff_t difference_type;
+
+    //! Allocator type
+    typedef A allocator_type;
+
+    //! Construct empty queue
+    explicit concurrent_queue(const allocator_type& a = allocator_type()) : 
+        my_allocator( a )
+    {
+    }
+
+    //! [begin,end) constructor
+    template<typename InputIterator>
+    concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) :
+        my_allocator( a )
+    {
+        for( ; begin != end; ++begin )
+            this->internal_push(&*begin);
+    }
+    
+    //! Copy constructor
+    concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : 
+        internal::concurrent_queue_base_v3<T>(), my_allocator( a )
+    {
+        this->assign( src );
+    }
+    
+    //! Destroy queue
+    ~concurrent_queue();
+
+    //! Enqueue an item at tail of queue.
+    void push( const T& source ) {
+        this->internal_push( &source );
+    }
+
+    //! Attempt to dequeue an item from head of queue.
+    /** Does not wait for item to become available.
+        Returns true if successful; false otherwise. */
+    bool try_pop( T& result ) {
+        return this->internal_try_pop( &result );
+    }
+
+    //! Return the number of items in the queue; thread unsafe
+    size_type unsafe_size() const {return this->internal_size();}
+
+    //! Equivalent to size()==0.
+    bool empty() const {return this->internal_empty();}
+
+    //! Clear the queue. not thread-safe.
+    void clear() ;
+
+    //! Return allocator object
+    allocator_type get_allocator() const { return this->my_allocator; }
+
+    typedef internal::concurrent_queue_iterator<concurrent_queue,T> iterator;
+    typedef internal::concurrent_queue_iterator<concurrent_queue,const T> const_iterator;
+
+    //------------------------------------------------------------------------
+    // The iterators are intended only for debugging.  They are slow and not thread safe.
+    //------------------------------------------------------------------------
+    iterator unsafe_begin() {return iterator(*this);}
+    iterator unsafe_end() {return iterator();}
+    const_iterator unsafe_begin() const {return const_iterator(*this);}
+    const_iterator unsafe_end() const {return const_iterator();}
+} ;
+
+template<typename T, class A>
+concurrent_queue<T,A>::~concurrent_queue() {
+    clear();
+    this->internal_finish_clear();
+}
+
+template<typename T, class A>
+void concurrent_queue<T,A>::clear() {
+    while( !empty() ) {
+        T value;
+        this->internal_try_pop(&value);
+    }
+}
+
+} // namespace strict_ppl
+    
+//! A high-performance thread-safe blocking concurrent bounded queue.
+/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics.
+    Note that method names agree with the PPL-style concurrent queue.
+    Multiple threads may each push and pop concurrently.
+    Assignment construction is not allowed.
+    @ingroup containers */
+template<typename T, class A = cache_aligned_allocator<T> >
+class concurrent_bounded_queue: public internal::concurrent_queue_base_v3 {
+    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;
+
+    //! Allocator type
+    typedef typename A::template rebind<char>::other page_allocator_type;
+    page_allocator_type my_allocator;
+
+    typedef typename concurrent_queue_base_v3::padded_page<T> padded_page;
+    //! Class used to ensure exception-safety of method "pop" 
+    class destroyer: internal::no_copy {
+        T& my_value;
+    public:
+        destroyer( T& value ) : my_value(value) {}
+        ~destroyer() {my_value.~T();}          
+    };
+
+    T& get_ref( page& p, size_t index ) {
+        __TBB_ASSERT( index<items_per_page, NULL );
+        return (&static_cast<padded_page*>(static_cast<void*>(&p))->last)[index];
+    }
+
+    /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) {
+        new( &get_ref(dst,index) ) T(*static_cast<const T*>(src)); 
+    }
+
+    /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) {
+        new( &get_ref(dst,dindex) ) T( get_ref( const_cast<page&>(src), sindex ) );
+    }
+
+    /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) {
+        T& from = get_ref(src,index);
+        destroyer d(from);
+        *static_cast<T*>(dst) = from;
+    }
+
+    /*overide*/ virtual page *allocate_page() {
+        size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T);
+        page *p = reinterpret_cast<page*>(my_allocator.allocate( n ));
+        if( !p )
+            internal::throw_exception(internal::eid_bad_alloc); 
+        return p;
+    }
+
+    /*override*/ virtual void deallocate_page( page *p ) {
+        size_t n = sizeof(padded_page) + items_per_page*sizeof(T);
+        my_allocator.deallocate( reinterpret_cast<char*>(p), n );
+    }
+
+public:
+    //! Element type in the queue.
+    typedef T value_type;
+
+    //! Allocator type
+    typedef A allocator_type;
+
+    //! Reference type
+    typedef T& reference;
+
+    //! Const reference type
+    typedef const T& const_reference;
+
+    //! Integral type for representing size of the queue.
+    /** Notice that the size_type is a signed integral type.
+        This is because the size can be negative if there are pending pops without corresponding pushes. */
+    typedef std::ptrdiff_t size_type;
+
+    //! Difference type for iterator
+    typedef std::ptrdiff_t difference_type;
+
+    //! Construct empty queue
+    explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : 
+        concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+    {
+    }
+
+    //! Copy constructor
+    concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) : 
+        concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+    {
+        assign( src );
+    }
+
+    //! [begin,end) constructor
+    template<typename InputIterator>
+    concurrent_bounded_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) :
+        concurrent_queue_base_v3( sizeof(T) ), my_allocator( a )
+    {
+        for( ; begin != end; ++begin )
+            internal_push_if_not_full(&*begin);
+    }
+
+    //! Destroy queue
+    ~concurrent_bounded_queue();
+
+    //! Enqueue an item at tail of queue.
+    void push( const T& source ) {
+        internal_push( &source );
+    }
+
+    //! Dequeue item from head of queue.
+    /** Block until an item becomes available, and then dequeue it. */
+    void pop( T& destination ) {
+        internal_pop( &destination );
+    }
+
+    //! Enqueue an item at tail of queue if queue is not already full.
+    /** Does not wait for queue to become not full.
+        Returns true if item is pushed; false if queue was already full. */
+    bool try_push( const T& source ) {
+        return internal_push_if_not_full( &source );
+    }
+
+    //! Attempt to dequeue an item from head of queue.
+    /** Does not wait for item to become available.
+        Returns true if successful; false otherwise. */
+    bool try_pop( T& destination ) {
+        return internal_pop_if_present( &destination );
+    }
+
+    //! Return number of pushes minus number of pops.
+    /** Note that the result can be negative if there are pops waiting for the 
+        corresponding pushes.  The result can also exceed capacity() if there 
+        are push operations in flight. */
+    size_type size() const {return internal_size();}
+
+    //! Equivalent to size()<=0.
+    bool empty() const {return internal_empty();}
+
+    //! Maximum number of allowed elements
+    size_type capacity() const {
+        return my_capacity;
+    }
+
+    //! Set the capacity
+    /** Setting the capacity to 0 causes subsequent try_push operations to always fail,
+        and subsequent push operations to block forever. */
+    void set_capacity( size_type new_capacity ) {
+        internal_set_capacity( new_capacity, sizeof(T) );
+    }
+
+    //! return allocator object
+    allocator_type get_allocator() const { return this->my_allocator; }
+
+    //! clear the queue. not thread-safe.
+    void clear() ;
+
+    typedef internal::concurrent_queue_iterator<concurrent_bounded_queue,T> iterator;
+    typedef internal::concurrent_queue_iterator<concurrent_bounded_queue,const T> const_iterator;
+
+    //------------------------------------------------------------------------
+    // The iterators are intended only for debugging.  They are slow and not thread safe.
+    //------------------------------------------------------------------------
+    iterator unsafe_begin() {return iterator(*this);}
+    iterator unsafe_end() {return iterator();}
+    const_iterator unsafe_begin() const {return const_iterator(*this);}
+    const_iterator unsafe_end() const {return const_iterator();}
+
+}; 
+
+template<typename T, class A>
+concurrent_bounded_queue<T,A>::~concurrent_bounded_queue() {
+    clear();
+    internal_finish_clear();
+}
+
+template<typename T, class A>
+void concurrent_bounded_queue<T,A>::clear() {
+    while( !empty() ) {
+        T value;
+        internal_pop_if_present(&value);
+    }
+}
+
+namespace deprecated {
+
+//! A high-performance thread-safe blocking concurrent bounded queue.
+/** This is the pre-PPL TBB concurrent queue which support boundedness and blocking semantics.
+    Note that method names agree with the PPL-style concurrent queue.
+    Multiple threads may each push and pop concurrently.
+    Assignment construction is not allowed.
+    @ingroup containers */
+template<typename T, class A = cache_aligned_allocator<T> > 
+class concurrent_queue: public concurrent_bounded_queue<T,A> {
+#if !__TBB_TEMPLATE_FRIENDS_BROKEN
+    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;
+#endif 
+
+public:
+    //! Construct empty queue
+    explicit concurrent_queue(const A& a = A()) : 
+        concurrent_bounded_queue<T,A>( a )
+    {
+    }
+
+    //! Copy constructor
+    concurrent_queue( const concurrent_queue& src, const A& a = A()) : 
+        concurrent_bounded_queue<T,A>( src, a )
+    {
+    }
+
+    //! [begin,end) constructor
+    template<typename InputIterator>
+    concurrent_queue( InputIterator b /*begin*/, InputIterator e /*end*/, const A& a = A()) :
+        concurrent_bounded_queue<T,A>( b, e, a )
+    {
+    }
+
+    //! Enqueue an item at tail of queue if queue is not already full.
+    /** Does not wait for queue to become not full.
+        Returns true if item is pushed; false if queue was already full. */
+    bool push_if_not_full( const T& source ) {
+        return this->try_push( source );
+    }
+
+    //! Attempt to dequeue an item from head of queue.
+    /** Does not wait for item to become available.
+        Returns true if successful; false otherwise. 
+        @deprecated Use try_pop()
+        */
+    bool pop_if_present( T& destination ) {
+        return this->try_pop( destination );
+    }
+
+    typedef typename concurrent_bounded_queue<T,A>::iterator iterator;
+    typedef typename concurrent_bounded_queue<T,A>::const_iterator const_iterator;
+    //
+    //------------------------------------------------------------------------
+    // The iterators are intended only for debugging.  They are slow and not thread safe.
+    //------------------------------------------------------------------------
+    iterator begin() {return this->unsafe_begin();}
+    iterator end() {return this->unsafe_end();}
+    const_iterator begin() const {return this->unsafe_begin();}
+    const_iterator end() const {return this->unsafe_end();}
+}; 
+
+}
+    
+
+#if TBB_DEPRECATED
+using deprecated::concurrent_queue;
+#else
+using strict_ppl::concurrent_queue;    
+#endif
+
+} // namespace tbb
+
+#endif /* __TBB_concurrent_queue_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_unordered_map.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_unordered_map.h
new file mode 100644 (file)
index 0000000..2521961
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/* Container implementations in this header are based on PPL implementations
+   provided by Microsoft. */
+
+#ifndef __TBB_concurrent_unordered_map_H
+#define __TBB_concurrent_unordered_map_H
+
+#include "_concurrent_unordered_internal.h"
+
+namespace tbb
+{
+
+// Template class for hash compare
+template<typename Key>
+class tbb_hash
+{
+public:
+    tbb_hash() {}
+
+    size_t operator()(const Key& key) const
+    {
+        return tbb_hasher(key);
+    }
+};
+
+namespace interface5 {
+
+// Template class for hash map traits
+template<typename Key, typename T, typename Hash_compare, typename Allocator, bool Allow_multimapping>
+class concurrent_unordered_map_traits
+{
+protected:
+    typedef std::pair<const Key, T> value_type;
+    typedef Key key_type;
+    typedef Hash_compare hash_compare;
+    typedef typename Allocator::template rebind<value_type>::other allocator_type;
+    enum { allow_multimapping = Allow_multimapping };
+
+    concurrent_unordered_map_traits() : my_hash_compare() {}
+    concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {}
+
+    class value_compare : public std::binary_function<value_type, value_type, bool>
+    {
+        friend class concurrent_unordered_map_traits<Key, T, Hash_compare, Allocator, Allow_multimapping>;
+
+    public:
+        bool operator()(const value_type& left, const value_type& right) const
+        {
+            return (my_hash_compare(left.first, right.first));
+        }
+
+        value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {}
+
+    protected:
+        hash_compare my_hash_compare;    // the comparator predicate for keys
+    };
+
+    template<class Type1, class Type2>
+    static const Key& get_key(const std::pair<Type1, Type2>& value) {
+        return (value.first);
+    }
+
+    hash_compare my_hash_compare; // the comparator predicate for keys
+};
+
+template <typename Key, typename T, typename Hasher = tbb_hash<Key>, typename Key_equality = std::equal_to<Key>, typename Allocator = tbb::tbb_allocator<std::pair<const Key, T> > >
+class concurrent_unordered_map : public internal::concurrent_unordered_base< concurrent_unordered_map_traits<Key, T, internal::hash_compare<Key, Hasher, Key_equality>, Allocator, false> >
+{
+    // Base type definitions
+    typedef internal::hash_compare<Key, Hasher, Key_equality> hash_compare;
+    typedef internal::concurrent_unordered_base< concurrent_unordered_map_traits<Key, T, hash_compare, Allocator, false> > base_type;
+    typedef concurrent_unordered_map_traits<Key, T, internal::hash_compare<Key, Hasher, Key_equality>, Allocator, false> traits_type;
+    using traits_type::my_hash_compare;
+#if __TBB_EXTRA_DEBUG
+public:
+#endif
+    using traits_type::allow_multimapping;
+public:
+    using base_type::end;
+    using base_type::find;
+    using base_type::insert;
+
+    // Type definitions
+    typedef Key key_type;
+    typedef typename base_type::value_type value_type;
+    typedef T mapped_type;
+    typedef Hasher hasher;
+    typedef Key_equality key_equal;
+    typedef hash_compare key_compare;
+
+    typedef typename base_type::allocator_type allocator_type;
+    typedef typename base_type::pointer pointer;
+    typedef typename base_type::const_pointer const_pointer;
+    typedef typename base_type::reference reference;
+    typedef typename base_type::const_reference const_reference;
+
+    typedef typename base_type::size_type size_type;
+    typedef typename base_type::difference_type difference_type;
+
+    typedef typename base_type::iterator iterator;
+    typedef typename base_type::const_iterator const_iterator;
+    typedef typename base_type::iterator local_iterator;
+    typedef typename base_type::const_iterator const_local_iterator;
+
+    // Construction/destruction/copying
+    explicit concurrent_unordered_map(size_type n_of_buckets = 8, const hasher& a_hasher = hasher(),
+        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())
+        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
+    {
+    }
+
+    concurrent_unordered_map(const Allocator& a) : base_type(8, key_compare(), a)
+    {
+    }
+
+    template <typename Iterator>
+    concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = 8, const hasher& a_hasher = hasher(),
+        const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type())
+        : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a)
+    {
+        for (; first != last; ++first)
+            base_type::insert(*first);
+    }
+
+    concurrent_unordered_map(const concurrent_unordered_map& table) : base_type(table)
+    {
+    }
+
+    concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a)
+        : base_type(table, a)
+    {
+    }
+
+    concurrent_unordered_map& operator=(const concurrent_unordered_map& table)
+    {
+        base_type::operator=(table);
+        return (*this);
+    }
+
+    iterator unsafe_erase(const_iterator where)
+    {
+        return base_type::unsafe_erase(where);
+    }
+
+    size_type unsafe_erase(const key_type& key)
+    {
+        return base_type::unsafe_erase(key);
+    }
+
+    iterator unsafe_erase(const_iterator first, const_iterator last)
+    {
+        return base_type::unsafe_erase(first, last);
+    }
+
+    void swap(concurrent_unordered_map& table)
+    {
+        base_type::swap(table);
+    }
+
+    // Observers
+    hasher hash_function() const
+    {
+        return my_hash_compare.my_hash_object;
+    }
+
+    key_equal key_eq() const
+    {
+        return my_hash_compare.my_key_compare_object;
+    }
+
+    mapped_type& operator[](const key_type& key)
+    {
+        iterator where = find(key);
+
+        if (where == end())
+        {
+            where = insert(std::pair<key_type, mapped_type>(key, mapped_type())).first;
+        }
+
+        return ((*where).second);
+    }
+
+    mapped_type& at(const key_type& key)
+    {
+        iterator where = find(key);
+
+        if (where == end())
+        {
+            tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+        }
+
+        return ((*where).second);
+    }
+
+    const mapped_type& at(const key_type& key) const
+    {
+        const_iterator where = find(key);
+
+        if (where == end())
+        {
+            tbb::internal::throw_exception(tbb::internal::eid_invalid_key);
+        }
+
+        return ((*where).second);
+    }
+};
+
+} // namespace interface5
+
+using interface5::concurrent_unordered_map;
+
+} // namespace tbb
+
+#endif// __TBB_concurrent_unordered_map_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_vector.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/concurrent_vector.h
new file mode 100644 (file)
index 0000000..8106eb4
--- /dev/null
@@ -0,0 +1,1060 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_vector_H
+#define __TBB_concurrent_vector_H
+
+#include "tbb_stddef.h"
+#include "tbb_exception.h"
+#include "atomic.h"
+#include "cache_aligned_allocator.h"
+#include "blocked_range.h"
+#include "tbb_machine.h"
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <algorithm>
+#include <iterator>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#if _MSC_VER==1500 && !__INTEL_COMPILER
+    // VS2008/VC9 seems to have an issue; limits pull in math.h
+    #pragma warning( push )
+    #pragma warning( disable: 4985 )
+#endif
+#include <limits> /* std::numeric_limits */
+#if _MSC_VER==1500 && !__INTEL_COMPILER
+    #pragma warning( pop )
+#endif
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (push)
+    #pragma warning (disable: 4267)
+#endif
+
+namespace tbb {
+
+template<typename T, class A = cache_aligned_allocator<T> >
+class concurrent_vector;
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! Bad allocation marker
+    static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
+
+    //! Routine that loads pointer from location pointed to by src without any fence, without causing ITT to report a race.
+    void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
+
+    //! Base class of concurrent vector implementation.
+    /** @ingroup containers */
+    class concurrent_vector_base_v3 {
+    protected:
+
+        // Basic types declarations
+        typedef size_t segment_index_t;
+        typedef size_t size_type;
+
+        // Using enumerations due to Mac linking problems of static const variables
+        enum {
+            // Size constants
+            default_initial_segments = 1, // 2 initial items
+            //! Number of slots for segment's pointers inside the class
+            pointers_per_short_table = 3, // to fit into 8 words of entire structure
+            pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
+        };
+
+        // Segment pointer. Can be zero-initialized
+        struct segment_t {
+            void* array;
+#if TBB_USE_ASSERT
+            ~segment_t() {
+                __TBB_ASSERT( array <= internal::vector_allocation_error_flag, "should have been freed by clear" );
+            }
+#endif /* TBB_USE_ASSERT */
+        };
+        // Data fields
+
+        //! allocator function pointer
+        void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
+
+        //! count of segments in the first block
+        atomic<size_type> my_first_block;
+
+        //! Requested size of vector
+        atomic<size_type> my_early_size;
+
+        //! Pointer to the segments table
+        atomic<segment_t*> my_segment;
+
+        //! embedded storage of segment pointers
+        segment_t my_storage[pointers_per_short_table];
+
+        // Methods
+
+        concurrent_vector_base_v3() {
+            my_early_size = 0;
+            my_first_block = 0; // here is not default_initial_segments
+            for( segment_index_t i = 0; i < pointers_per_short_table; i++)
+                my_storage[i].array = NULL;
+            my_segment = my_storage;
+        }
+        __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
+
+        static segment_index_t segment_index_of( size_type index ) {
+            return segment_index_t( __TBB_Log2( index|1 ) );
+        }
+
+        static segment_index_t segment_base( segment_index_t k ) {
+            return (segment_index_t(1)<<k & ~segment_index_t(1));
+        }
+
+        static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
+            segment_index_t k = segment_index_of( index );
+            index -= segment_base(k);
+            return k;
+        }
+
+        static size_type segment_size( segment_index_t k ) {
+            return segment_index_t(1)<<k; // fake value for k==0
+        }
+
+        //! An operation on an n-element array starting at begin.
+        typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
+
+        //! An operation on n-element destination array and n-element source array.
+        typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
+
+        //! Internal structure for compact()
+        struct internal_segments_table {
+            segment_index_t first_block;
+            void* table[pointers_per_long_table];
+        };
+
+        void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
+        size_type __TBB_EXPORTED_METHOD internal_capacity() const;
+        void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
+        size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
+        void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
+        segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
+        void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
+        void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
+        void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
+                              internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
+        //! Obsolete
+        void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
+        void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
+
+        void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
+                                                    internal_array_op1 destroy, internal_array_op2 init );
+        size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
+
+        //! Deprecated entry point for backwards compatibility to TBB 2.1.
+        void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
+private:
+        //! Private functionality
+        class helper;
+        friend class helper;
+    };
+    
+    typedef concurrent_vector_base_v3 concurrent_vector_base;
+
+    //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
+    /** Value is either the T or const T type of the container.
+        @ingroup containers */
+    template<typename Container, typename Value>
+    class vector_iterator 
+    {
+        //! concurrent_vector over which we are iterating.
+        Container* my_vector;
+
+        //! Index into the vector 
+        size_t my_index;
+
+        //! Caches my_vector-&gt;internal_subscript(my_index)
+        /** NULL if cached value is not available */
+        mutable Value* my_item;
+
+        template<typename C, typename T>
+        friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
+
+        template<typename C, typename T, typename U>
+        friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+    
+        template<typename C, typename U>
+        friend class internal::vector_iterator;
+
+#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+        template<typename T, class A>
+        friend class tbb::concurrent_vector;
+#else
+public: // workaround for MSVC
+#endif 
+
+        vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : 
+            my_vector(const_cast<Container*>(&vector)), 
+            my_index(index), 
+            my_item(static_cast<Value*>(ptr))
+        {}
+
+    public:
+        //! Default constructor
+        vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
+
+        vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
+            my_vector(other.my_vector),
+            my_index(other.my_index),
+            my_item(other.my_item)
+        {}
+
+        vector_iterator operator+( ptrdiff_t offset ) const {
+            return vector_iterator( *my_vector, my_index+offset );
+        }
+        vector_iterator &operator+=( ptrdiff_t offset ) {
+            my_index+=offset;
+            my_item = NULL;
+            return *this;
+        }
+        vector_iterator operator-( ptrdiff_t offset ) const {
+            return vector_iterator( *my_vector, my_index-offset );
+        }
+        vector_iterator &operator-=( ptrdiff_t offset ) {
+            my_index-=offset;
+            my_item = NULL;
+            return *this;
+        }
+        Value& operator*() const {
+            Value* item = my_item;
+            if( !item ) {
+                item = my_item = &my_vector->internal_subscript(my_index);
+            }
+            __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
+            return *item;
+        }
+        Value& operator[]( ptrdiff_t k ) const {
+            return my_vector->internal_subscript(my_index+k);
+        }
+        Value* operator->() const {return &operator*();}
+
+        //! Pre increment
+        vector_iterator& operator++() {
+            size_t k = ++my_index;
+            if( my_item ) {
+                // Following test uses 2's-complement wizardry
+                if( (k& (k-2))==0 ) {
+                    // k is a power of two that is at least k-2
+                    my_item= NULL;
+                } else {
+                    ++my_item;
+                }
+            }
+            return *this;
+        }
+
+        //! Pre decrement
+        vector_iterator& operator--() {
+            __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); 
+            size_t k = my_index--;
+            if( my_item ) {
+                // Following test uses 2's-complement wizardry
+                if( (k& (k-2))==0 ) {
+                    // k is a power of two that is at least k-2  
+                    my_item= NULL;
+                } else {
+                    --my_item;
+                }
+            }
+            return *this;
+        }
+
+        //! Post increment
+        vector_iterator operator++(int) {
+            vector_iterator result = *this;
+            operator++();
+            return result;
+        }
+
+        //! Post decrement
+        vector_iterator operator--(int) {
+            vector_iterator result = *this;
+            operator--();
+            return result;
+        }
+
+        // STL support
+
+        typedef ptrdiff_t difference_type;
+        typedef Value value_type;
+        typedef Value* pointer;
+        typedef Value& reference;
+        typedef std::random_access_iterator_tag iterator_category;
+    };
+
+    template<typename Container, typename T>
+    vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
+        return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return i.my_index==j.my_index && i.my_vector == j.my_vector;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(i==j);
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return i.my_index<j.my_index;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return j<i;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(i<j);
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(j<i);
+    }
+
+    template<typename Container, typename T, typename U>
+    ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
+    }
+
+    template<typename T, class A>
+    class allocator_base {
+    public:
+        typedef typename A::template
+            rebind<T>::other allocator_type;
+        allocator_type my_allocator;
+
+        allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
+    };
+
+} // namespace internal
+//! @endcond
+
+//! Concurrent vector container
+/** concurrent_vector is a container having the following main properties:
+    - It provides random indexed access to its elements. The index of the first element is 0.
+    - It ensures safe concurrent growing its size (different threads can safely append new elements).
+    - Adding new elements does not invalidate existing iterators and does not change indices of existing items.
+
+@par Compatibility
+    The class meets all Container Requirements and Reversible Container Requirements from
+    C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet
+    Sequence Requirements due to absence of insert() and erase() methods.
+
+@par Exception Safety
+    Methods working with memory allocation and/or new elements construction can throw an
+    exception if allocator fails to allocate memory or element's default constructor throws one.
+    Concurrent vector's element of type T must conform to the following requirements:
+    - Throwing an exception is forbidden for destructor of T.
+    - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.
+    .
+    Otherwise, the program's behavior is undefined.
+@par
+    If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.
+    Invalid state means:
+    - There are no guaranties that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.
+    - An invalid vector instance cannot be repaired; it is unable to grow anymore.
+    - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.
+    - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.
+    .
+    If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.
+
+@par Fragmentation
+    Unlike an STL vector, a concurrent_vector does not move existing elements if it needs
+    to allocate more memory. The container is divided into a series of contiguous arrays of
+    elements. The first reservation, growth, or assignment operation determines the size of
+    the first array. Using small number of elements as initial size incurs fragmentation that
+    may increase element access time. Internal layout can be optimized by method compact() that
+    merges several smaller arrays into one solid.
+
+@par Changes since TBB 2.1
+    - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.
+    - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector
+    - Added resize() methods (not thread-safe)
+    - Added cbegin/cend/crbegin/crend methods
+    - Changed return type of methods grow* and push_back to iterator
+
+@par Changes since TBB 2.0
+    - Implemented exception-safety guaranties
+    - Added template argument for allocator
+    - Added allocator argument in constructors
+    - Faster index calculation
+    - First growth call specifies a number of segments to be merged in the first allocation.
+    - Fixed memory blow up for swarm of vector's instances of small size
+    - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. 
+    - Added STL-like constructors.
+    - Added operators ==, < and derivatives
+    - Added at() method, approved for using after an exception was thrown inside the vector
+    - Added get_allocator() method.
+    - Added assign() methods
+    - Added compact() method to defragment first segments
+    - Added swap() method
+    - range() defaults on grainsize = 1 supporting auto grainsize algorithms. 
+
+    @ingroup containers */
+template<typename T, class A>
+class concurrent_vector: protected internal::allocator_base<T, A>,
+                         private internal::concurrent_vector_base {
+private:
+    template<typename I>
+    class generic_range_type: public blocked_range<I> {
+    public:
+        typedef T value_type;
+        typedef T& reference;
+        typedef const T& const_reference;
+        typedef I iterator;
+        typedef ptrdiff_t difference_type;
+        generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
+        template<typename U>
+        generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
+        generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
+    };
+
+    template<typename C, typename U>
+    friend class internal::vector_iterator;
+public:
+    //------------------------------------------------------------------------
+    // STL compatible types
+    //------------------------------------------------------------------------
+    typedef internal::concurrent_vector_base_v3::size_type size_type;
+    typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
+
+    typedef T value_type;
+    typedef ptrdiff_t difference_type;
+    typedef T& reference;
+    typedef const T& const_reference;
+    typedef T *pointer;
+    typedef const T *const_pointer;
+
+    typedef internal::vector_iterator<concurrent_vector,T> iterator;
+    typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
+
+#if !defined(_MSC_VER) || _CPPLIB_VER>=300 
+    // Assume ISO standard definition of std::reverse_iterator
+    typedef std::reverse_iterator<iterator> reverse_iterator;
+    typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+#else
+    // Use non-standard std::reverse_iterator
+    typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
+    typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
+#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
+
+    //------------------------------------------------------------------------
+    // Parallel algorithm support
+    //------------------------------------------------------------------------
+    typedef generic_range_type<iterator> range_type;
+    typedef generic_range_type<const_iterator> const_range_type;
+
+    //------------------------------------------------------------------------
+    // STL compatible constructors & destructors
+    //------------------------------------------------------------------------
+
+    //! Construct empty vector.
+    explicit concurrent_vector(const allocator_type &a = allocator_type())
+        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
+    {
+        vector_allocator_ptr = &internal_allocator;
+    }
+
+    //! Copying constructor
+    concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
+        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
+    {
+        vector_allocator_ptr = &internal_allocator;
+        __TBB_TRY {
+            internal_copy(vector, sizeof(T), &copy_array);
+        } __TBB_CATCH(...) {
+            segment_t *table = my_segment;
+            internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+            __TBB_RETHROW();
+        }
+    }
+
+    //! Copying constructor for vector with different allocator type
+    template<class M>
+    concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
+        : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
+    {
+        vector_allocator_ptr = &internal_allocator;
+        __TBB_TRY {
+            internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
+        } __TBB_CATCH(...) {
+            segment_t *table = my_segment;
+            internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+            __TBB_RETHROW();
+        }
+    }
+
+    //! Construction with initial size specified by argument n
+    explicit concurrent_vector(size_type n)
+    {
+        vector_allocator_ptr = &internal_allocator;
+        __TBB_TRY {
+            internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
+        } __TBB_CATCH(...) {
+            segment_t *table = my_segment;
+            internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+            __TBB_RETHROW();
+        }
+    }
+
+    //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance
+    concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
+        : internal::allocator_base<T, A>(a)
+    {
+        vector_allocator_ptr = &internal_allocator;
+        __TBB_TRY {
+            internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
+        } __TBB_CATCH(...) {
+            segment_t *table = my_segment;
+            internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+            __TBB_RETHROW();
+        }
+    }
+
+    //! Construction with copying iteration range and given allocator instance
+    template<class I>
+    concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
+        : internal::allocator_base<T, A>(a)
+    {
+        vector_allocator_ptr = &internal_allocator;
+        __TBB_TRY {
+            internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
+        } __TBB_CATCH(...) {
+            segment_t *table = my_segment;
+            internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+            __TBB_RETHROW();
+        }
+    }
+
+    //! Assignment
+    concurrent_vector& operator=( const concurrent_vector& vector ) {
+        if( this != &vector )
+            internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
+        return *this;
+    }
+
+    //! Assignment for vector with different allocator type
+    template<class M>
+    concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {
+        if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
+            internal_assign(vector.internal_vector_base(),
+                sizeof(T), &destroy_array, &assign_array, &copy_array);
+        return *this;
+    }
+
+    //------------------------------------------------------------------------
+    // Concurrent operations
+    //------------------------------------------------------------------------
+    //! Grow by "delta" elements.
+#if TBB_DEPRECATED
+    /** Returns old size. */
+    size_type grow_by( size_type delta ) {
+        return delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size;
+    }
+#else
+    /** Returns iterator pointing to the first new element. */
+    iterator grow_by( size_type delta ) {
+        return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size);
+    }
+#endif
+
+    //! Grow by "delta" elements using copying constuctor.
+#if TBB_DEPRECATED
+    /** Returns old size. */
+    size_type grow_by( size_type delta, const_reference t ) {
+        return delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size;
+    }
+#else
+    /** Returns iterator pointing to the first new element. */
+    iterator grow_by( size_type delta, const_reference t ) {
+        return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size);
+    }
+#endif
+
+    //! Append minimal sequence of elements such that size()>=n.  
+#if TBB_DEPRECATED
+    /** The new elements are default constructed.  Blocks until all elements in range [0..n) are allocated.
+        May return while other elements are being constructed by other threads. */
+    void grow_to_at_least( size_type n ) {
+        if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
+    };
+#else
+    /** The new elements are default constructed.  Blocks until all elements in range [0..n) are allocated.
+        May return while other elements are being constructed by other threads.
+        Returns iterator that points to beginning of appended sequence.
+        If no elements were appended, returns iterator pointing to nth element. */
+    iterator grow_to_at_least( size_type n ) {
+        size_type m=0;
+        if( n ) {
+            m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
+            if( m>n ) m=n;
+        }
+        return iterator(*this, m);
+    };
+#endif
+
+    //! Push item 
+#if TBB_DEPRECATED
+    size_type push_back( const_reference item )
+#else
+    /** Returns iterator pointing to the new element. */
+    iterator push_back( const_reference item )
+#endif
+    {
+        size_type k;
+        void *ptr = internal_push_back(sizeof(T),k);
+        internal_loop_guide loop(1, ptr);
+        loop.init(&item);
+#if TBB_DEPRECATED
+        return k;
+#else
+        return iterator(*this, k, ptr);
+#endif
+    }
+
+    //! Get reference to element at given index.
+    /** This method is thread-safe for concurrent reads, and also while growing the vector,
+        as long as the calling thread has checked that index&lt;size(). */
+    reference operator[]( size_type index ) {
+        return internal_subscript(index);
+    }
+
+    //! Get const reference to element at given index.
+    const_reference operator[]( size_type index ) const {
+        return internal_subscript(index);
+    }
+
+    //! Get reference to element at given index. Throws exceptions on errors.
+    reference at( size_type index ) {
+        return internal_subscript_with_exceptions(index);
+    }
+
+    //! Get const reference to element at given index. Throws exceptions on errors.
+    const_reference at( size_type index ) const {
+        return internal_subscript_with_exceptions(index);
+    }
+
+    //! Get range for iterating with parallel algorithms
+    range_type range( size_t grainsize = 1) {
+        return range_type( begin(), end(), grainsize );
+    }
+
+    //! Get const range for iterating with parallel algorithms
+    const_range_type range( size_t grainsize = 1 ) const {
+        return const_range_type( begin(), end(), grainsize );
+    }
+    //------------------------------------------------------------------------
+    // Capacity
+    //------------------------------------------------------------------------
+    //! Return size of vector. It may include elements under construction
+    size_type size() const {
+        size_type sz = my_early_size, cp = internal_capacity();
+        return cp < sz ? cp : sz;
+    }
+
+    //! Return true if vector is not empty or has elements under construction at least.
+    bool empty() const {return !my_early_size;}
+
+    //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.
+    size_type capacity() const {return internal_capacity();}
+
+    //! Allocate enough space to grow to size n without having to allocate more memory later.
+    /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. 
+        The capacity afterwards may be bigger than the requested reservation. */
+    void reserve( size_type n ) {
+        if( n )
+            internal_reserve(n, sizeof(T), max_size());
+    }
+
+    //! Resize the vector. Not thread-safe.
+    void resize( size_type n ) {
+        internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
+    }
+    
+    //! Resize the vector, copy t for new elements. Not thread-safe.
+    void resize( size_type n, const_reference t ) {
+        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
+    }
+   
+#if TBB_DEPRECATED 
+    //! An alias for shrink_to_fit()
+    void compact() {shrink_to_fit();}
+#endif /* TBB_DEPRECATED */
+
+    //! Optimize memory usage and fragmentation.
+    void shrink_to_fit();
+
+    //! Upper bound on argument to reserve.
+    size_type max_size() const {return (~size_type(0))/sizeof(T);}
+
+    //------------------------------------------------------------------------
+    // STL support
+    //------------------------------------------------------------------------
+
+    //! start iterator
+    iterator begin() {return iterator(*this,0);}
+    //! end iterator
+    iterator end() {return iterator(*this,size());}
+    //! start const iterator
+    const_iterator begin() const {return const_iterator(*this,0);}
+    //! end const iterator
+    const_iterator end() const {return const_iterator(*this,size());}
+    //! start const iterator
+    const_iterator cbegin() const {return const_iterator(*this,0);}
+    //! end const iterator
+    const_iterator cend() const {return const_iterator(*this,size());}
+    //! reverse start iterator
+    reverse_iterator rbegin() {return reverse_iterator(end());}
+    //! reverse end iterator
+    reverse_iterator rend() {return reverse_iterator(begin());}
+    //! reverse start const iterator
+    const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
+    //! reverse end const iterator
+    const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
+    //! reverse start const iterator
+    const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
+    //! reverse end const iterator
+    const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
+    //! the first item
+    reference front() {
+        __TBB_ASSERT( size()>0, NULL);
+        return static_cast<T*>(my_segment[0].array)[0];
+    }
+    //! the first item const
+    const_reference front() const {
+        __TBB_ASSERT( size()>0, NULL);
+        return static_cast<const T*>(my_segment[0].array)[0];
+    }
+    //! the last item
+    reference back() {
+        __TBB_ASSERT( size()>0, NULL);
+        return internal_subscript( size()-1 );
+    }
+    //! the last item const
+    const_reference back() const {
+        __TBB_ASSERT( size()>0, NULL);
+        return internal_subscript( size()-1 );
+    }
+    //! return allocator object
+    allocator_type get_allocator() const { return this->my_allocator; }
+
+    //! assign n items by copying t item
+    void assign(size_type n, const_reference t) {
+        clear();
+        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
+    }
+
+    //! assign range [first, last)
+    template<class I>
+    void assign(I first, I last) {
+        clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
+    }
+
+    //! swap two instances
+    void swap(concurrent_vector &vector) {
+        if( this != &vector ) {
+            concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
+            std::swap(this->my_allocator, vector.my_allocator);
+        }
+    }
+
+    //! Clear container while keeping memory allocated.
+    /** To free up the memory, use in conjunction with method compact(). Not thread safe **/
+    void clear() {
+        internal_clear(&destroy_array);
+    }
+
+    //! Clear and destroy vector.
+    ~concurrent_vector() {
+        segment_t *table = my_segment;
+        internal_free_segments( reinterpret_cast<void**>(table), internal_clear(&destroy_array), my_first_block );
+        // base class destructor call should be then
+    }
+
+    const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
+private:
+    //! Allocate k items
+    static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
+        return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
+    }
+    //! Free k segments from table
+    void internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block);
+
+    //! Get reference to element at given index.
+    T& internal_subscript( size_type index ) const;
+
+    //! Get reference to element at given index with errors checks
+    T& internal_subscript_with_exceptions( size_type index ) const;
+
+    //! assign n items by copying t
+    void internal_assign_n(size_type n, const_pointer p) {
+        internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
+    }
+
+    //! helper class
+    template<bool B> class is_integer_tag;
+
+    //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
+    template<class I>
+    void internal_assign_range(I first, I last, is_integer_tag<true> *) {
+        internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
+    }
+    //! inline proxy assign by iterators
+    template<class I>
+    void internal_assign_range(I first, I last, is_integer_tag<false> *) {
+        internal_assign_iterators(first, last);
+    }
+    //! assign by iterators
+    template<class I>
+    void internal_assign_iterators(I first, I last);
+
+    //! Construct n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
+
+    //! Construct n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
+
+    //! Construct n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
+
+    //! Assign n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
+
+    //! Destroy n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
+
+    //! Exception-aware helper class for filling a segment by exception-danger operators of user class
+    class internal_loop_guide : internal::no_copy {
+    public:
+        const pointer array;
+        const size_type n;
+        size_type i;
+        internal_loop_guide(size_type ntrials, void *ptr)
+            : array(static_cast<pointer>(ptr)), n(ntrials), i(0) {}
+        void init() {   for(; i < n; ++i) new( &array[i] ) T(); }
+        void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*static_cast<const T*>(src)); }
+        void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(static_cast<const T*>(src)[i]); }
+        void assign(const void *src) { for(; i < n; ++i) array[i] = static_cast<const T*>(src)[i]; }
+        template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
+        ~internal_loop_guide() {
+            if(i < n) // if exception raised, do zerroing on the rest of items
+                std::memset(array+i, 0, (n-i)*sizeof(value_type));
+        }
+    };
+};
+
+template<typename T, class A>
+void concurrent_vector<T, A>::shrink_to_fit() {
+    internal_segments_table old;
+    __TBB_TRY {
+        if( internal_compact( sizeof(T), &old, &destroy_array, &copy_array ) )
+            internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
+    } __TBB_CATCH(...) {
+        if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
+            internal_free_segments( old.table, 1, old.first_block );
+        __TBB_RETHROW();
+    }
+}
+
+template<typename T, class A>
+void concurrent_vector<T, A>::internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block) {
+    // Free the arrays
+    while( k > first_block ) {
+        --k;
+        T* array = static_cast<T*>(table[k]);
+        table[k] = NULL;
+        if( array > internal::vector_allocation_error_flag ) // check for correct segment pointer
+            this->my_allocator.deallocate( array, segment_size(k) );
+    }
+    T* array = static_cast<T*>(table[0]);
+    if( array > internal::vector_allocation_error_flag ) {
+        __TBB_ASSERT( first_block > 0, NULL );
+        while(k > 0) table[--k] = NULL;
+        this->my_allocator.deallocate( array, segment_size(first_block) );
+    }
+}
+
+template<typename T, class A>
+T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
+    __TBB_ASSERT( index < my_early_size, "index out of bounds" );
+    size_type j = index;
+    segment_index_t k = segment_base_index_of( j );
+    __TBB_ASSERT( (segment_t*)my_segment != my_storage || k < pointers_per_short_table, "index is being allocated" );
+    // no need in __TBB_load_with_acquire since thread works in own space or gets 
+#if TBB_USE_THREADING_TOOLS
+    T* array = static_cast<T*>( tbb::internal::itt_load_pointer_v3(&my_segment[k].array));
+#else
+    T* array = static_cast<T*>(my_segment[k].array);
+#endif /* TBB_USE_THREADING_TOOLS */
+    __TBB_ASSERT( array != internal::vector_allocation_error_flag, "the instance is broken by bad allocation. Use at() instead" );
+    __TBB_ASSERT( array, "index is being allocated" );
+    return array[j];
+}
+
+template<typename T, class A>
+T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {
+    if( index >= my_early_size )
+        internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
+    size_type j = index;
+    segment_index_t k = segment_base_index_of( j );
+    if( (segment_t*)my_segment == my_storage && k >= pointers_per_short_table )
+        internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
+    void *array = my_segment[k].array; // no need in __TBB_load_with_acquire
+    if( array <= internal::vector_allocation_error_flag ) // check for correct segment pointer
+        internal::throw_exception(internal::eid_index_range_error); // throw std::range_error
+    return static_cast<T*>(array)[j];
+}
+
+template<typename T, class A> template<class I>
+void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {
+    __TBB_ASSERT(my_early_size == 0, NULL);
+    size_type n = std::distance(first, last);
+    if( !n ) return;
+    internal_reserve(n, sizeof(T), max_size());
+    my_early_size = n;
+    segment_index_t k = 0;
+    size_type sz = segment_size( my_first_block );
+    while( sz < n ) {
+        internal_loop_guide loop(sz, my_segment[k].array);
+        loop.iterate(first);
+        n -= sz;
+        if( !k ) k = my_first_block;
+        else { ++k; sz <<= 1; }
+    }
+    internal_loop_guide loop(n, my_segment[k].array);
+    loop.iterate(first);
+}
+
+template<typename T, class A>
+void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
+    internal_loop_guide loop(n, begin); loop.init();
+}
+
+template<typename T, class A>
+void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
+    internal_loop_guide loop(n, begin); loop.init(src);
+}
+
+template<typename T, class A>
+void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
+    internal_loop_guide loop(n, dst); loop.copy(src);
+}
+
+template<typename T, class A>
+void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
+    internal_loop_guide loop(n, dst); loop.assign(src);
+}
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) 
+    // Workaround for overzealous compiler warning
+    #pragma warning (push)
+    #pragma warning (disable: 4189)
+#endif
+template<typename T, class A>
+void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
+    T* array = static_cast<T*>(begin);
+    for( size_type j=n; j>0; --j )
+        array[j-1].~T(); // destructors are supposed to not throw any exceptions
+}
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) 
+    #pragma warning (pop)
+#endif // warning 4189 is back 
+
+// concurrent_vector's template functions
+template<typename T, class A1, class A2>
+inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
+    // Simply:    return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
+    if(a.size() != b.size()) return false;
+    typename concurrent_vector<T, A1>::const_iterator i(a.begin());
+    typename concurrent_vector<T, A2>::const_iterator j(b.begin());
+    for(; i != a.end(); ++i, ++j)
+        if( !(*i == *j) ) return false;
+    return true;
+}
+
+template<typename T, class A1, class A2>
+inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
+{    return !(a == b); }
+
+template<typename T, class A1, class A2>
+inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
+{    return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
+
+template<typename T, class A1, class A2>
+inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
+{    return b < a; }
+
+template<typename T, class A1, class A2>
+inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
+{    return !(b < a); }
+
+template<typename T, class A1, class A2>
+inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
+{    return !(a < b); }
+
+template<typename T, class A>
+inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
+{    a.swap( b ); }
+
+} // namespace tbb
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64)
+    #pragma warning (pop)
+#endif // warning 4267 is back
+
+#endif /* __TBB_concurrent_vector_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/critical_section.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/critical_section.h
new file mode 100644 (file)
index 0000000..a3e435a
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_CRITICAL_SECTION_H_
+#define _TBB_CRITICAL_SECTION_H_
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#else
+#include <pthread.h>
+#include <errno.h>
+#endif  // _WIN32||WIN64
+
+#include "tbb_stddef.h"
+#include "tbb_thread.h"
+#include "tbb_exception.h"
+
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+    namespace internal {
+class critical_section_v4 : internal::no_copy {
+#if _WIN32||_WIN64
+    CRITICAL_SECTION my_impl;
+#else
+    pthread_mutex_t my_impl;
+#endif
+    tbb_thread::id my_tid;
+public:
+
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    critical_section_v4() { 
+#if _WIN32||_WIN64
+        InitializeCriticalSection(&my_impl);
+#else
+        pthread_mutex_init(&my_impl, NULL);
+#endif
+        internal_construct();
+    }
+
+    ~critical_section_v4() {
+        __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section");
+#if _WIN32||_WIN64
+        DeleteCriticalSection(&my_impl); 
+#else
+        pthread_mutex_destroy(&my_impl);
+#endif
+    }
+
+    class scoped_lock : internal::no_copy {
+    private:
+        critical_section_v4 &my_crit;
+    public:
+        scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) {
+            my_crit.lock();
+        }
+
+        ~scoped_lock() {
+            my_crit.unlock();
+        }
+    };
+
+    void lock() { 
+        tbb_thread::id local_tid = this_tbb_thread::get_id();
+        if(local_tid == my_tid) throw_exception( eid_improper_lock );
+#if _WIN32||_WIN64
+        EnterCriticalSection( &my_impl );
+#else
+        int rval = pthread_mutex_lock(&my_impl);
+        __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed");
+#endif
+        __TBB_ASSERT(my_tid == tbb_thread::id(), NULL);
+        my_tid = local_tid;
+    }
+
+    bool try_lock() {
+        bool gotlock;
+        tbb_thread::id local_tid = this_tbb_thread::get_id();
+        if(local_tid == my_tid) return false;
+#if _WIN32||_WIN64
+        gotlock = TryEnterCriticalSection( &my_impl ) != 0;
+#else
+        int rval = pthread_mutex_trylock(&my_impl);
+        // valid returns are 0 (locked) and [EBUSY]
+        __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed");
+        gotlock = rval == 0;
+#endif
+        if(gotlock)  {
+            my_tid = local_tid;
+        }
+        return gotlock;
+    }
+
+    void unlock() {
+        __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it");
+        my_tid = tbb_thread::id();
+#if _WIN32||_WIN64
+        LeaveCriticalSection( &my_impl );
+#else
+        int rval = pthread_mutex_unlock(&my_impl);
+        __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed");
+#endif
+    }
+
+    static const bool is_rw_mutex = false;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = true;
+}; // critical_section_v4
+} // namespace internal
+typedef internal::critical_section_v4 critical_section;
+
+__TBB_DEFINE_PROFILING_SET_NAME(critical_section)
+} // namespace tbb
+#endif  // _TBB_CRITICAL_SECTION_H_
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/enumerable_thread_specific.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/enumerable_thread_specific.h
new file mode 100644 (file)
index 0000000..c475e17
--- /dev/null
@@ -0,0 +1,999 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_enumerable_thread_specific_H
+#define __TBB_enumerable_thread_specific_H
+
+#include "concurrent_vector.h"
+#include "tbb_thread.h"
+#include "cache_aligned_allocator.h"
+#include "aligned_space.h"
+#include <string.h>  // for memcpy
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#else
+#include <pthread.h>
+#endif
+
+namespace tbb {
+
+//! enum for selecting between single key and key-per-instance versions
+enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
+
+namespace interface6 {
+    //! @cond
+    namespace internal { 
+
+        template<ets_key_usage_type ETS_key_type>
+        class ets_base: tbb::internal::no_copy {
+        protected:
+#if _WIN32||_WIN64
+            typedef DWORD key_type;
+#else
+            typedef pthread_t key_type;
+#endif
+#if __TBB_GCC_3_3_PROTECTED_BROKEN
+        public:
+#endif
+            struct slot;
+
+            struct array {
+                array* next;
+                size_t lg_size;
+                slot& at( size_t k ) {
+                    return ((slot*)(void*)(this+1))[k];
+                }
+                size_t size() const {return (size_t)1<<lg_size;}
+                size_t mask() const {return size()-1;}
+                size_t start( size_t h ) const {
+                    return h>>(8*sizeof(size_t)-lg_size);
+                }
+            };
+            struct slot {
+                key_type key;
+                void* ptr;
+                bool empty() const {return !key;}
+                bool match( key_type k ) const {return key==k;}
+                bool claim( key_type k ) {
+                    __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
+                    return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
+                }
+            };
+#if __TBB_GCC_3_3_PROTECTED_BROKEN
+        protected:
+#endif
+        
+            static key_type key_of_current_thread() {
+               tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
+               key_type k;
+               memcpy( &k, &id, sizeof(k) );
+               return k;
+            }
+
+            //! Root of linked list of arrays of decreasing size.
+            /** NULL if and only if my_count==0.  
+                Each array in the list is half the size of its predecessor. */
+            atomic<array*> my_root;
+            atomic<size_t> my_count;
+            virtual void* create_local() = 0;
+            virtual void* create_array(size_t _size) = 0;  // _size in bytes
+            virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
+            array* allocate( size_t lg_size ) {
+                size_t n = 1<<lg_size;  
+                array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
+                a->lg_size = lg_size;
+                std::memset( a+1, 0, n*sizeof(slot) );
+                return a;
+            }
+            void free(array* a) {
+                size_t n = 1<<(a->lg_size);  
+                free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
+            }
+            static size_t hash( key_type k ) {
+                // Multiplicative hashing.  Client should use *upper* bits.
+                // casts required for Mac gcc4.* compiler
+#if __TBB_WORDSIZE == 4
+                return uintptr_t(k)*0x9E3779B9;
+#else
+                return uintptr_t(k)*0x9E3779B97F4A7C15;
+#endif 
+            } 
+        
+            ets_base() {my_root=NULL; my_count=0;}
+            virtual ~ets_base();  // g++ complains if this is not virtual...
+            void* table_lookup( bool& exists );
+            void table_clear();
+            slot& table_find( key_type k ) {
+                size_t h = hash(k);
+                array* r = my_root;
+                size_t mask = r->mask();
+                for(size_t i = r->start(h);;i=(i+1)&mask) {
+                    slot& s = r->at(i);
+                    if( s.empty() || s.match(k) )
+                        return s;
+                }
+            }
+            void table_reserve_for_copy( const ets_base& other ) {
+                __TBB_ASSERT(!my_root,NULL);
+                __TBB_ASSERT(!my_count,NULL);
+                if( other.my_root ) {
+                    array* a = allocate(other.my_root->lg_size);
+                    a->next = NULL;
+                    my_root = a;
+                    my_count = other.my_count;
+                }
+            }
+        };
+
+        template<ets_key_usage_type ETS_key_type>
+        ets_base<ETS_key_type>::~ets_base() {
+            __TBB_ASSERT(!my_root, NULL);
+        }
+
+        template<ets_key_usage_type ETS_key_type>
+        void ets_base<ETS_key_type>::table_clear() {
+            while( array* r = my_root ) {
+                my_root = r->next;
+                free(r);
+            }
+            my_count = 0;
+        }
+                
+        template<ets_key_usage_type ETS_key_type>
+        void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
+            const key_type k = key_of_current_thread(); 
+
+            __TBB_ASSERT(k!=0,NULL);
+            void* found;
+            size_t h = hash(k);
+            for( array* r=my_root; r; r=r->next ) {
+                size_t mask=r->mask();
+                for(size_t i = r->start(h); ;i=(i+1)&mask) {
+                    slot& s = r->at(i);
+                    if( s.empty() ) break;
+                    if( s.match(k) ) {
+                        if( r==my_root ) {
+                            // Success at top level
+                            exists = true;
+                            return s.ptr;
+                        } else {
+                            // Success at some other level.  Need to insert at top level.
+                            exists = true;
+                            found = s.ptr;
+                            goto insert;
+                        }
+                    }
+                }
+            }
+            // Key does not yet exist
+            exists = false;
+            found = create_local();
+            {
+                size_t c = ++my_count;
+                array* r = my_root;
+                if( !r || c>r->size()/2 ) {
+                    size_t s = r ? r->lg_size : 2;
+                    while( c>size_t(1)<<(s-1) ) ++s;
+                    array* a = allocate(s);
+                    for(;;) {
+                        a->next = my_root;
+                        array* new_r = my_root.compare_and_swap(a,r);
+                        if( new_r==r ) break;
+                        if( new_r->lg_size>=s ) {
+                            // Another thread inserted an equal or  bigger array, so our array is superfluous.
+                            free(a);
+                            break;
+                        }
+                        r = new_r;
+                    }
+                }
+            }
+        insert:
+            // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
+            array* ir = my_root;
+            size_t mask = ir->mask();
+            for(size_t i = ir->start(h);;i=(i+1)&mask) {
+                slot& s = ir->at(i);
+                if( s.empty() ) {
+                    if( s.claim(k) ) {
+                        s.ptr = found;
+                        return found;
+                    }
+                }
+            }
+        }
+
+        //! Specialization that exploits native TLS 
+        template <>
+        class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
+            typedef ets_base<ets_no_key> super;
+#if _WIN32||_WIN64
+            typedef DWORD tls_key_t;
+            void create_key() { my_key = TlsAlloc(); }
+            void destroy_key() { TlsFree(my_key); }
+            void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
+            void* get_tls() { return (void *)TlsGetValue(my_key); }
+#else
+            typedef pthread_key_t tls_key_t;
+            void create_key() { pthread_key_create(&my_key, NULL); }
+            void destroy_key() { pthread_key_delete(my_key); }
+            void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
+            void* get_tls() const { return pthread_getspecific(my_key); }
+#endif
+            tls_key_t my_key;
+            virtual void* create_local() = 0;
+            virtual void* create_array(size_t _size) = 0;  // _size in bytes
+            virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
+        public:
+            ets_base() {create_key();}
+            ~ets_base() {destroy_key();}
+            void* table_lookup( bool& exists ) {
+                void* found = get_tls();
+                if( found ) {
+                    exists=true;
+                } else {
+                    found = super::table_lookup(exists);
+                    set_tls(found);
+                }
+                return found; 
+            }
+            void table_clear() {
+                destroy_key();
+                create_key(); 
+                super::table_clear();
+            }
+        };
+
+        //! Random access iterator for traversing the thread local copies.
+        template< typename Container, typename Value >
+        class enumerable_thread_specific_iterator 
+#if defined(_WIN64) && defined(_MSC_VER) 
+            // Ensure that Microsoft's internal template function _Val_type works correctly.
+            : public std::iterator<std::random_access_iterator_tag,Value>
+#endif /* defined(_WIN64) && defined(_MSC_VER) */
+        {
+            //! current position in the concurrent_vector 
+        
+            Container *my_container;
+            typename Container::size_type my_index;
+            mutable Value *my_value;
+        
+            template<typename C, typename T>
+            friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
+                                                                       const enumerable_thread_specific_iterator<C,T>& v );
+        
+            template<typename C, typename T, typename U>
+            friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
+                                    const enumerable_thread_specific_iterator<C,U>& j );
+        
+            template<typename C, typename T, typename U>
+            friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
+                                   const enumerable_thread_specific_iterator<C,U>& j );
+        
+            template<typename C, typename T, typename U>
+            friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
+            
+            template<typename C, typename U> 
+            friend class enumerable_thread_specific_iterator;
+        
+            public:
+        
+            enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
+                my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
+        
+            //! Default constructor
+            enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
+        
+            template<typename U>
+            enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
+                    my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
+        
+            enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
+                return enumerable_thread_specific_iterator(*my_container, my_index + offset);
+            }
+        
+            enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
+                my_index += offset;
+                my_value = NULL;
+                return *this;
+            }
+        
+            enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
+                return enumerable_thread_specific_iterator( *my_container, my_index-offset );
+            }
+        
+            enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
+                my_index -= offset;
+                my_value = NULL;
+                return *this;
+            }
+        
+            Value& operator*() const {
+                Value* value = my_value;
+                if( !value ) {
+                    value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
+                }
+                __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
+                return *value;
+            }
+        
+            Value& operator[]( ptrdiff_t k ) const {
+               return (*my_container)[my_index + k].value;
+            }
+        
+            Value* operator->() const {return &operator*();}
+        
+            enumerable_thread_specific_iterator& operator++() {
+                ++my_index;
+                my_value = NULL;
+                return *this;
+            }
+        
+            enumerable_thread_specific_iterator& operator--() {
+                --my_index;
+                my_value = NULL;
+                return *this;
+            }
+        
+            //! Post increment
+            enumerable_thread_specific_iterator operator++(int) {
+                enumerable_thread_specific_iterator result = *this;
+                ++my_index;
+                my_value = NULL;
+                return result;
+            }
+        
+            //! Post decrement
+            enumerable_thread_specific_iterator operator--(int) {
+                enumerable_thread_specific_iterator result = *this;
+                --my_index;
+                my_value = NULL;
+                return result;
+            }
+        
+            // STL support
+            typedef ptrdiff_t difference_type;
+            typedef Value value_type;
+            typedef Value* pointer;
+            typedef Value& reference;
+            typedef std::random_access_iterator_tag iterator_category;
+        };
+        
+        template<typename Container, typename T>
+        enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
+                                                                    const enumerable_thread_specific_iterator<Container,T>& v ) {
+            return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
+                         const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return i.my_index==j.my_index && i.my_container == j.my_container;
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
+                         const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return !(i==j);
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
+                        const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return i.my_index<j.my_index;
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
+                        const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return j<i;
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
+                         const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return !(i<j);
+        }
+        
+        template<typename Container, typename T, typename U>
+        bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
+                         const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return !(j<i);
+        }
+        
+        template<typename Container, typename T, typename U>
+        ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
+                             const enumerable_thread_specific_iterator<Container,U>& j ) {
+            return i.my_index-j.my_index;
+        }
+
+    template<typename SegmentedContainer, typename Value >
+        class segmented_iterator
+#if defined(_WIN64) && defined(_MSC_VER)
+        : public std::iterator<std::input_iterator_tag, Value>
+#endif
+        {
+            template<typename C, typename T, typename U>
+            friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
+
+            template<typename C, typename T, typename U>
+            friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
+            
+            template<typename C, typename U> 
+            friend class segmented_iterator;
+
+            public:
+
+                segmented_iterator() {my_segcont = NULL;}
+
+                segmented_iterator( const SegmentedContainer& _segmented_container ) : 
+                    my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
+                    outer_iter(my_segcont->end()) { }
+
+                ~segmented_iterator() {}
+
+                typedef typename SegmentedContainer::iterator outer_iterator;
+                typedef typename SegmentedContainer::value_type InnerContainer;
+                typedef typename InnerContainer::iterator inner_iterator;
+
+                // STL support
+                typedef ptrdiff_t difference_type;
+                typedef Value value_type;
+                typedef typename SegmentedContainer::size_type size_type;
+                typedef Value* pointer;
+                typedef Value& reference;
+                typedef std::input_iterator_tag iterator_category;
+
+                // Copy Constructor
+                template<typename U>
+                segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
+                    my_segcont(other.my_segcont),
+                    outer_iter(other.outer_iter),
+                    // can we assign a default-constructed iterator to inner if we're at the end?
+                    inner_iter(other.inner_iter)
+                {}
+
+                // assignment
+                template<typename U>
+                segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
+                    if(this != &other) {
+                        my_segcont = other.my_segcont;
+                        outer_iter = other.outer_iter;
+                        if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
+                    }
+                    return *this;
+                }
+
+                // allow assignment of outer iterator to segmented iterator.  Once it is
+                // assigned, move forward until a non-empty inner container is found or
+                // the end of the outer container is reached.
+                segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
+                    __TBB_ASSERT(my_segcont != NULL, NULL);
+                    // check that this iterator points to something inside the segmented container
+                    for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
+                        if( !outer_iter->empty() ) {
+                            inner_iter = outer_iter->begin();
+                            break;
+                        }
+                    }
+                    return *this;
+                }
+
+                // pre-increment
+                segmented_iterator& operator++() {
+                    advance_me();
+                    return *this;
+                }
+
+                // post-increment
+                segmented_iterator operator++(int) {
+                    segmented_iterator tmp = *this;
+                    operator++();
+                    return tmp;
+                }
+
+                bool operator==(const outer_iterator& other_outer) const {
+                    __TBB_ASSERT(my_segcont != NULL, NULL);
+                    return (outer_iter == other_outer &&
+                            (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
+                }
+
+                bool operator!=(const outer_iterator& other_outer) const {
+                    return !operator==(other_outer);
+
+                }
+
+                // (i)* RHS
+                reference operator*() const {
+                    __TBB_ASSERT(my_segcont != NULL, NULL);
+                    __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
+                    __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
+                    return *inner_iter;
+                }
+
+                // i->
+                pointer operator->() const { return &operator*();}
+
+            private:
+                SegmentedContainer*             my_segcont;
+                outer_iterator outer_iter;
+                inner_iterator inner_iter;
+
+                void advance_me() {
+                    __TBB_ASSERT(my_segcont != NULL, NULL);
+                    __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
+                    __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
+                    ++inner_iter;
+                    while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
+                        inner_iter = outer_iter->begin();
+                    }
+                }
+        };    // segmented_iterator
+
+        template<typename SegmentedContainer, typename T, typename U>
+        bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
+                         const segmented_iterator<SegmentedContainer,U>& j ) {
+            if(i.my_segcont != j.my_segcont) return false;
+            if(i.my_segcont == NULL) return true;
+            if(i.outer_iter != j.outer_iter) return false;
+            if(i.outer_iter == i.my_segcont->end()) return true;
+            return i.inner_iter == j.inner_iter;
+        }
+
+        // !=
+        template<typename SegmentedContainer, typename T, typename U>
+        bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
+                         const segmented_iterator<SegmentedContainer,U>& j ) {
+            return !(i==j);
+        }
+
+        template<typename T>
+        struct destruct_only: tbb::internal::no_copy {
+            tbb::aligned_space<T,1> value;
+            ~destruct_only() {value.begin()[0].~T();}
+        };
+
+        template<typename T>
+        struct construct_by_default: tbb::internal::no_assign {
+            void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
+            construct_by_default( int ) {}
+        };
+
+        template<typename T>
+        struct construct_by_exemplar: tbb::internal::no_assign {
+            const T exemplar;
+            void construct(void*where) {new(where) T(exemplar);}
+            construct_by_exemplar( const T& t ) : exemplar(t) {}
+        };
+
+        template<typename T, typename Finit>
+        struct construct_by_finit: tbb::internal::no_assign {
+            Finit f;
+            void construct(void* where) {new(where) T(f());}
+            construct_by_finit( const Finit& f_ ) : f(f_) {}
+        };
+
+        // storage for initialization function pointer
+        template<typename T>
+        class callback_base {
+        public:
+            // Clone *this
+            virtual callback_base* clone() = 0;
+            // Destruct and free *this
+            virtual void destroy() = 0;
+            // Need virtual destructor to satisfy GCC compiler warning
+            virtual ~callback_base() { }
+            // Construct T at where
+            virtual void construct(void* where) = 0;
+        };
+
+        template <typename T, typename Constructor>
+        class callback_leaf: public callback_base<T>, Constructor {
+            template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
+
+            typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
+
+            /*override*/ callback_base<T>* clone() {
+                void* where = my_allocator_type().allocate(1);
+                return new(where) callback_leaf(*this);
+            }
+
+            /*override*/ void destroy() {
+                my_allocator_type().destroy(this);
+                my_allocator_type().deallocate(this,1);
+            }
+
+            /*override*/ void construct(void* where) {
+                Constructor::construct(where);
+            }  
+        public:
+            template<typename X>
+            static callback_base<T>* make( const X& x ) {
+                void* where = my_allocator_type().allocate(1);
+                return new(where) callback_leaf(x);
+            }
+        };
+
+        //! Template for adding padding in order to avoid false sharing
+        /** ModularSize should be sizeof(U) modulo the cache line size.
+            All maintenance of the space will be done explicitly on push_back,
+            and all thread local copies must be destroyed before the concurrent
+            vector is deleted.
+        */
+        template<typename U, size_t ModularSize>
+        struct ets_element {
+            char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
+            void unconstruct() {
+                tbb::internal::punned_cast<U*>(&value)->~U();
+            }
+        };
+
+    } // namespace internal
+    //! @endcond
+
+    //! The enumerable_thread_specific container
+    /** enumerable_thread_specific has the following properties:
+        - thread-local copies are lazily created, with default, exemplar or function initialization.
+        - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant.
+        - the contained objects need not have operator=() defined if combine is not used.
+        - enumerable_thread_specific containers may be copy-constructed or assigned.
+        - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed.
+        - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods
+        
+    @par Segmented iterator
+        When the thread-local objects are containers with input_iterators defined, a segmented iterator may
+        be used to iterate over all the elements of all thread-local copies.
+
+    @par combine and combine_each
+        - Both methods are defined for enumerable_thread_specific. 
+        - combine() requires the the type T have operator=() defined.  
+        - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)  
+        - Both are evaluated in serial context (the methods are assumed to be non-benign.)
+        
+    @ingroup containers */
+    template <typename T, 
+              typename Allocator=cache_aligned_allocator<T>, 
+              ets_key_usage_type ETS_key_type=ets_no_key > 
+    class enumerable_thread_specific: internal::ets_base<ETS_key_type> { 
+
+        template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
+    
+        typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
+
+        //! A generic range, used to create range objects from the iterators
+        template<typename I>
+        class generic_range_type: public blocked_range<I> {
+        public:
+            typedef T value_type;
+            typedef T& reference;
+            typedef const T& const_reference;
+            typedef I iterator;
+            typedef ptrdiff_t difference_type;
+            generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
+            template<typename U>
+            generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
+            generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
+        };
+    
+        typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
+        typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
+        
+        internal::callback_base<T> *my_construct_callback;
+
+        internal_collection_type my_locals;
+   
+        /*override*/ void* create_local() {
+#if TBB_DEPRECATED
+            void* lref = &my_locals[my_locals.push_back(padded_element())];
+#else
+            void* lref = &*my_locals.push_back(padded_element());
+#endif
+            my_construct_callback->construct(lref);
+            return lref;
+        } 
+
+        void unconstruct_locals() {
+            for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
+                cvi->unconstruct();
+            }
+        }
+
+        typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
+
+        // _size is in bytes
+        /*override*/ void* create_array(size_t _size) {
+            size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
+            return array_allocator_type().allocate(nelements);
+        }
+
+        /*override*/ void free_array( void* _ptr, size_t _size) {
+            size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
+            array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
+        }
+   
+    public:
+    
+        //! Basic types
+        typedef Allocator allocator_type;
+        typedef T value_type;
+        typedef T& reference;
+        typedef const T& const_reference;
+        typedef T* pointer;
+        typedef const T* const_pointer;
+        typedef typename internal_collection_type::size_type size_type;
+        typedef typename internal_collection_type::difference_type difference_type;
+    
+        // Iterator types
+        typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
+        typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
+
+        // Parallel range types
+        typedef generic_range_type< iterator > range_type;
+        typedef generic_range_type< const_iterator > const_range_type;
+    
+        //! Default constructor.  Each local instance of T is default constructed.
+        enumerable_thread_specific() : 
+            my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) ) 
+        {}
+
+        //! Constructor with initializer functor.  Each local instance of T is constructed by T(finit()).
+        template <typename Finit>
+        enumerable_thread_specific( Finit finit ) : 
+            my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) ) 
+        {}
+    
+        //! Constuctor with exemplar.  Each local instance of T is copied-constructed from the exemplar.
+        enumerable_thread_specific(const T& exemplar) : 
+            my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
+        {}
+    
+        //! Destructor
+        ~enumerable_thread_specific() { 
+            my_construct_callback->destroy();
+            this->clear();  // deallocation before the derived class is finished destructing
+            // So free(array *) is still accessible
+        }
+      
+        //! returns reference to local, discarding exists
+        reference local() {
+            bool exists;
+            return local(exists);
+        }
+
+        //! Returns reference to calling thread's local copy, creating one if necessary
+        reference local(bool& exists)  {
+            __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented"); 
+            void* ptr = this->table_lookup(exists);
+            return *(T*)ptr;
+        }
+
+        //! Get the number of local copies
+        size_type size() const { return my_locals.size(); }
+    
+        //! true if there have been no local copies created
+        bool empty() const { return my_locals.empty(); }
+    
+        //! begin iterator
+        iterator begin() { return iterator( my_locals, 0 ); }
+        //! end iterator
+        iterator end() { return iterator(my_locals, my_locals.size() ); }
+    
+        //! begin const iterator
+        const_iterator begin() const { return const_iterator(my_locals, 0); }
+    
+        //! end const iterator
+        const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
+
+        //! Get range for parallel algorithms
+        range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
+        
+        //! Get const range for parallel algorithms
+        const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
+
+        //! Destroys local copies
+        void clear() {
+            unconstruct_locals();
+            my_locals.clear();
+            this->table_clear();
+            // callback is not destroyed
+            // exemplar is not destroyed
+        }
+
+    private:
+
+        template<typename U, typename A2, ets_key_usage_type C2>
+        void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
+
+    public:
+
+        template<typename U, typename Alloc, ets_key_usage_type Cachetype>
+        enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
+        {
+            internal_copy(other);
+        }
+
+        enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
+        {
+            internal_copy(other);
+        }
+
+    private:
+
+        template<typename U, typename A2, ets_key_usage_type C2>
+        enumerable_thread_specific &
+        internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
+            if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
+                this->clear(); 
+                my_construct_callback->destroy();
+                my_construct_callback = 0;
+                internal_copy( other );
+            }
+            return *this;
+        }
+
+    public:
+
+        // assignment
+        enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
+            return internal_assign(other);
+        }
+
+        template<typename U, typename Alloc, ets_key_usage_type Cachetype>
+        enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
+        {
+            return internal_assign(other);
+        }
+
+        // combine_func_t has signature T(T,T) or T(const T&, const T&)
+        template <typename combine_func_t>
+        T combine(combine_func_t f_combine) {
+            if(begin() == end()) {
+                internal::destruct_only<T> location;
+                my_construct_callback->construct(location.value.begin());
+                return *location.value.begin();
+            }
+            const_iterator ci = begin();
+            T my_result = *ci;
+            while(++ci != end()) 
+                my_result = f_combine( my_result, *ci );
+            return my_result;
+        }
+
+        // combine_func_t has signature void(T) or void(const T&)
+        template <typename combine_func_t>
+        void combine_each(combine_func_t f_combine) {
+            for(const_iterator ci = begin(); ci != end(); ++ci) {
+                f_combine( *ci );
+            }
+        }
+
+    }; // enumerable_thread_specific
+
+    template <typename T, typename Allocator, ets_key_usage_type ETS_key_type> 
+    template<typename U, typename A2, ets_key_usage_type C2>
+    void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
+        // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
+        my_construct_callback = other.my_construct_callback->clone();
+
+        typedef internal::ets_base<ets_no_key> base;
+        __TBB_ASSERT(my_locals.size()==0,NULL);
+        this->table_reserve_for_copy( other );
+        for( base::array* r=other.my_root; r; r=r->next ) {
+            for( size_t i=0; i<r->size(); ++i ) {
+                base::slot& s1 = r->at(i);
+                if( !s1.empty() ) {
+                    base::slot& s2 = this->table_find(s1.key);
+                    if( s2.empty() ) { 
+#if TBB_DEPRECATED
+                        void* lref = &my_locals[my_locals.push_back(padded_element())];
+#else
+                        void* lref = &*my_locals.push_back(padded_element());
+#endif
+                        s2.ptr = new(lref) T(*(U*)s1.ptr);
+                        s2.key = s1.key;
+                    } else {
+                        // Skip the duplicate
+                    } 
+                }
+            }
+        }
+    }
+
+    template< typename Container >
+    class flattened2d {
+
+        // This intermediate typedef is to address issues with VC7.1 compilers
+        typedef typename Container::value_type conval_type;
+
+    public:
+
+        //! Basic types
+        typedef typename conval_type::size_type size_type;
+        typedef typename conval_type::difference_type difference_type;
+        typedef typename conval_type::allocator_type allocator_type;
+        typedef typename conval_type::value_type value_type;
+        typedef typename conval_type::reference reference;
+        typedef typename conval_type::const_reference const_reference;
+        typedef typename conval_type::pointer pointer;
+        typedef typename conval_type::const_pointer const_pointer;
+
+        typedef typename internal::segmented_iterator<Container, value_type> iterator;
+        typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
+
+        flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
+            my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
+
+        flattened2d( const Container &c ) : 
+            my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
+
+        iterator begin() { return iterator(*my_container) = my_begin; }
+        iterator end() { return iterator(*my_container) = my_end; }
+        const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
+        const_iterator end() const { return const_iterator(*my_container) = my_end; }
+
+        size_type size() const {
+            size_type tot_size = 0;
+            for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
+                tot_size += i->size();
+            }
+            return tot_size;
+        }
+
+    private:
+
+        Container *my_container;
+        typename Container::const_iterator my_begin;
+        typename Container::const_iterator my_end;
+
+    };
+
+    template <typename Container>
+    flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
+        return flattened2d<Container>(c, b, e);
+    }
+
+    template <typename Container>
+    flattened2d<Container> flatten2d(const Container &c) {
+        return flattened2d<Container>(c);
+    }
+
+} // interface6
+
+namespace internal {
+using interface6::internal::segmented_iterator;
+}
+
+using interface6::enumerable_thread_specific;
+using interface6::flattened2d;
+using interface6::flatten2d;
+
+} // namespace tbb
+
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/index.html b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/index.html
new file mode 100644 (file)
index 0000000..7e4552e
--- /dev/null
@@ -0,0 +1,28 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+Include files for Threading Building Blocks classes and functions.
+
+<BR><A HREF=".">Click here</A> to see all files in the directory.
+
+<H2>Directories</H2>
+<DL>
+<DT><A HREF="machine">machine</A>
+<DD>Include files for low-level architecture specific functionality.
+<DT><A HREF="compat">compat</A>
+<DD>Include files for source level compatibility with other frameworks.
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/ibm_aix51.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/ibm_aix51.h
new file mode 100644 (file)
index 0000000..408d48d
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#define __TBB_WORDSIZE 8
+#define __TBB_BIG_ENDIAN 1
+
+#include <stdint.h>
+#include <unistd.h>
+#include <sched.h>
+
+extern "C" {
+
+int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand);
+int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand);
+void    __TBB_machine_flush  ();
+
+}
+
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cas_32(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cas_64(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cas_64(P,V,C)
+#define __TBB_Yield() sched_yield()
+
+#if __GNUC__
+#define __TBB_full_memory_fence() __asm__ __volatile__("sync": : :"memory")
+#define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory")
+#else
+// IBM C++ Compiler does not support inline assembly
+#define __TBB_full_memory_fence() __TBB_machine_flush ()
+#define __TBB_release_consistency_helper() __TBB_machine_flush ()
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_common.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_common.h
new file mode 100644 (file)
index 0000000..2306eb0
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <sched.h>
+#define __TBB_Yield()  sched_yield()
+
+/* Futex definitions */
+#include <sys/syscall.h>
+
+#if defined(SYS_futex)
+
+#define __TBB_USE_FUTEX 1
+#include <limits.h>
+#include <errno.h>
+// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE.
+
+#ifdef FUTEX_WAIT
+#define __TBB_FUTEX_WAIT FUTEX_WAIT
+#else
+#define __TBB_FUTEX_WAIT 0
+#endif
+
+#ifdef FUTEX_WAKE
+#define __TBB_FUTEX_WAKE FUTEX_WAKE
+#else
+#define __TBB_FUTEX_WAKE 1
+#endif
+
+#ifndef __TBB_ASSERT
+#error machine specific headers must be included after tbb_stddef.h
+#endif
+
+namespace tbb {
+
+namespace internal {
+
+inline int futex_wait( void *futex, int comparand ) {
+    int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 );
+#if TBB_USE_ASSERT
+    int e = errno;
+    __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." );
+#endif /* TBB_USE_ASSERT */
+    return r;
+}
+
+inline int futex_wakeup_one( void *futex ) {
+    int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 );
+    __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" );
+    return r;
+}
+
+inline int futex_wakeup_all( void *futex ) {
+    int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 );
+    __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" );
+    return r;
+}
+
+} /* namespace internal */
+
+} /* namespace tbb */
+
+#endif /* SYS_futex */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia32.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia32.h
new file mode 100644 (file)
index 0000000..eb4028a
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <stdint.h>
+#include <unistd.h>
+
+#define __TBB_WORDSIZE 4
+#define __TBB_BIG_ENDIAN 0
+
+#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory")
+#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory")
+
+#if __TBB_ICC_ASM_VOLATILE_BROKEN
+#define __TBB_VOLATILE
+#else
+#define __TBB_VOLATILE volatile
+#endif
+
+#define __MACHINE_DECL_ATOMICS(S,T,X) \
+static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand )  \
+{                                                                                    \
+    T result;                                                                        \
+                                                                                     \
+    __asm__ __volatile__("lock\ncmpxchg" X " %2,%1"                                  \
+                          : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr)              \
+                          : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     \
+static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend)              \
+{                                                                                    \
+    T result;                                                                        \
+    __asm__ __volatile__("lock\nxadd" X " %0,%1"                                     \
+                          : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr)              \
+                          : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr)                \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     \
+static inline  T __TBB_machine_fetchstore##S(volatile void *ptr, T value)            \
+{                                                                                    \
+    T result;                                                                        \
+    __asm__ __volatile__("lock\nxchg" X " %0,%1"                                     \
+                          : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr)              \
+                          : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr)                 \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     
+__MACHINE_DECL_ATOMICS(1,int8_t,"")
+__MACHINE_DECL_ATOMICS(2,int16_t,"")
+__MACHINE_DECL_ATOMICS(4,int32_t,"l")
+
+static inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )
+{
+    int64_t result;
+#if __PIC__ 
+    /* compiling position-independent code */
+    // EBX register preserved for compliance with position-independent code rules on IA32
+    __asm__ __volatile__ (
+            "pushl %%ebx\n\t"
+            "movl  (%%ecx),%%ebx\n\t"
+            "movl  4(%%ecx),%%ecx\n\t"
+            "lock\n\t cmpxchg8b %1\n\t"
+            "popl  %%ebx"
+             : "=A"(result), "=m"(*(int64_t *)ptr)
+             : "m"(*(int64_t *)ptr)
+             , "0"(comparand)
+             , "c"(&value)
+             : "memory", "esp"
+#if __INTEL_COMPILER
+             ,"ebx"
+#endif
+    );
+#else /* !__PIC__ */
+    union {
+        int64_t i64;
+        int32_t i32[2];
+    };
+    i64 = value;
+    __asm__ __volatile__ (
+            "lock\n\t cmpxchg8b %1\n\t"
+             : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr)
+             : "m"(*(__TBB_VOLATILE int64_t *)ptr)
+             , "0"(comparand)
+             , "b"(i32[0]), "c"(i32[1])
+             : "memory"
+    );
+#endif /* __PIC__ */
+    return result;
+}
+
+static inline int32_t __TBB_machine_lg( uint32_t x ) {
+    int32_t j;
+    __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x));
+    return j;
+}
+
+static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) {
+    __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory");
+}
+
+static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) {
+    __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory");
+}
+
+static inline void __TBB_machine_pause( int32_t delay ) {
+    for (int32_t i = 0; i < delay; i++) {
+       __asm__ __volatile__("pause;");
+    }
+    return;
+}   
+
+static inline int64_t __TBB_machine_load8 (const volatile void *ptr) {
+    int64_t result;
+    if( ((uint32_t)ptr&7u)==0 ) {
+        // Aligned load
+        __asm__ __volatile__ ( "fildq %1\n\t"
+                               "fistpq %0" :  "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" );
+    } else {
+        // Unaligned load
+        result = __TBB_machine_cmpswp8(const_cast<void*>(ptr),0,0);
+    }
+    return result;
+}
+
+//! Handles misaligned 8-byte store
+/** Defined in tbb_misc.cpp */
+extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value );
+extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr );
+
+static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) {
+    if( ((uint32_t)ptr&7u)==0 ) {
+        // Aligned store
+        __asm__ __volatile__ ( "fildq %1\n\t"
+                               "fistpq %0" :  "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" );
+    } else {
+        // Unaligned store
+#if TBB_USE_PERFORMANCE_WARNINGS
+        __TBB_machine_store8_slow_perf_warning(ptr);
+#endif /* TBB_USE_PERFORMANCE_WARNINGS */
+        __TBB_machine_store8_slow(ptr,value);
+    }
+}
+// Machine specific atomic operations
+
+#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C)
+#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C)
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+
+#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V)
+#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V)
+#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V)
+#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V)
+
+#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V)
+#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V)
+#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V)
+#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V)
+
+#define __TBB_Store8(P,V) __TBB_machine_store8(P,V)
+#define __TBB_Load8(P)    __TBB_machine_load8(P)
+
+#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
+#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
+
+
+// Those we chose not to implement (they will be implemented generically using CMPSWP8)
+#undef __TBB_FetchAndAdd8
+#undef __TBB_FetchAndStore8
+
+// Definition of other functions
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#define __TBB_Log2(V)  __TBB_machine_lg(V)
+
+// Special atomic functions
+#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V)
+#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
+#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1)
+
+// Use generic definitions from tbb_machine.h
+#undef __TBB_TryLockByte
+#undef __TBB_LockByte
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia64.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_ia64.h
new file mode 100644 (file)
index 0000000..502f8d0
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <stdint.h>
+#include <unistd.h>
+#include <ia64intrin.h>
+
+#define __TBB_WORDSIZE 8
+#define __TBB_BIG_ENDIAN 0
+#define __TBB_DECL_FENCED_ATOMICS 1
+
+// Most of the functions will be in a .s file
+
+extern "C" {
+    int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); 
+    int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend);
+    int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend);
+    int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend);
+    int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value);
+    int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value);
+
+    int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand);
+    int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend);
+    int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend);
+    int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend);
+    int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value);
+    int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value);
+
+    int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value);
+    int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value);
+    int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value);
+    int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend);
+    int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend);
+
+    int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand);
+    int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value);
+    int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value);
+    int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value);
+    int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend);
+    int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend);
+
+    int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); 
+    int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); 
+    int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value);
+
+    int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); 
+    int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); 
+    int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value);
+
+    int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand);
+    int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); 
+    int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); 
+    int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value);
+
+    int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); 
+    int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); 
+    int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value);
+
+    int64_t __TBB_machine_lg(uint64_t value);
+    void __TBB_machine_pause(int32_t delay);
+    bool __TBB_machine_trylockbyte( volatile unsigned char &ptr );
+    int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr );
+
+    //! Retrieves the current RSE backing store pointer. IA64 specific.
+    void* __TBB_get_bsp();
+}
+
+#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1__TBB_full_fence(P,V,C)
+#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2__TBB_full_fence(P,V,C) 
+
+#define __TBB_FetchAndAdd1(P,V)        __TBB_machine_fetchadd1__TBB_full_fence(P,V)
+#define __TBB_FetchAndAdd1acquire(P,V) __TBB_machine_fetchadd1acquire(P,V)
+#define __TBB_FetchAndAdd1release(P,V) __TBB_machine_fetchadd1release(P,V)
+#define __TBB_FetchAndAdd2(P,V)        __TBB_machine_fetchadd2__TBB_full_fence(P,V)
+#define __TBB_FetchAndAdd2acquire(P,V) __TBB_machine_fetchadd2acquire(P,V)
+#define __TBB_FetchAndAdd2release(P,V) __TBB_machine_fetchadd2release(P,V)
+#define __TBB_FetchAndAdd4acquire(P,V) __TBB_machine_fetchadd4acquire(P,V)
+#define __TBB_FetchAndAdd4release(P,V) __TBB_machine_fetchadd4release(P,V)
+#define __TBB_FetchAndAdd8acquire(P,V) __TBB_machine_fetchadd8acquire(P,V)
+#define __TBB_FetchAndAdd8release(P,V) __TBB_machine_fetchadd8release(P,V)
+
+#define __TBB_FetchAndStore1acquire(P,V) __TBB_machine_fetchstore1acquire(P,V)
+#define __TBB_FetchAndStore1release(P,V) __TBB_machine_fetchstore1release(P,V)
+#define __TBB_FetchAndStore2acquire(P,V) __TBB_machine_fetchstore2acquire(P,V)
+#define __TBB_FetchAndStore2release(P,V) __TBB_machine_fetchstore2release(P,V)
+#define __TBB_FetchAndStore4acquire(P,V) __TBB_machine_fetchstore4acquire(P,V)
+#define __TBB_FetchAndStore4release(P,V) __TBB_machine_fetchstore4release(P,V)
+#define __TBB_FetchAndStore8acquire(P,V) __TBB_machine_fetchstore8acquire(P,V)
+#define __TBB_FetchAndStore8release(P,V) __TBB_machine_fetchstore8release(P,V)
+
+#define __TBB_CompareAndSwap1acquire(P,V,C) __TBB_machine_cmpswp1acquire(P,V,C)
+#define __TBB_CompareAndSwap1release(P,V,C) __TBB_machine_cmpswp1release(P,V,C)
+#define __TBB_CompareAndSwap2acquire(P,V,C) __TBB_machine_cmpswp2acquire(P,V,C)
+#define __TBB_CompareAndSwap2release(P,V,C) __TBB_machine_cmpswp2release(P,V,C)
+#define __TBB_CompareAndSwap4(P,V,C)        __TBB_machine_cmpswp4__TBB_full_fence(P,V,C)
+#define __TBB_CompareAndSwap4acquire(P,V,C) __TBB_machine_cmpswp4acquire(P,V,C)
+#define __TBB_CompareAndSwap4release(P,V,C) __TBB_machine_cmpswp4release(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C)        __TBB_machine_cmpswp8__TBB_full_fence(P,V,C)
+#define __TBB_CompareAndSwap8acquire(P,V,C) __TBB_machine_cmpswp8acquire(P,V,C)
+#define __TBB_CompareAndSwap8release(P,V,C) __TBB_machine_cmpswp8release(P,V,C)
+
+#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4__TBB_full_fence(P,V)
+#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8__TBB_full_fence(P,V)
+
+#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1__TBB_full_fence(P,V)
+#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2__TBB_full_fence(P,V)
+#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4__TBB_full_fence(P,V)
+#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8__TBB_full_fence(P,V)
+
+#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAdd8acquire(P,1)
+#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAdd8release(P,-1)
+
+#ifndef __INTEL_COMPILER
+/* Even though GCC imbues volatile loads with acquire semantics, 
+   it sometimes moves loads over the acquire fence.  The
+   fences defined here stop such incorrect code motion. */
+#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory")
+#define __TBB_full_memory_fence() __asm__ __volatile__("mf": : :"memory")
+#else
+#define __TBB_release_consistency_helper()
+#define __TBB_full_memory_fence() __mf()
+#endif /* __INTEL_COMPILER */
+
+// Special atomic functions
+#define __TBB_CompareAndSwapW(P,V,C)   __TBB_CompareAndSwap8(P,V,C)
+#define __TBB_FetchAndStoreW(P,V)      __TBB_FetchAndStore8(P,V)
+#define __TBB_FetchAndAddW(P,V)        __TBB_FetchAndAdd8(P,V)
+#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAdd8release(P,V)
+
+// Not needed
+#undef __TBB_Store8
+#undef __TBB_Load8
+
+// Definition of Lock functions
+#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P)
+#define __TBB_LockByte(P)    __TBB_machine_lockbyte(P)
+
+// Definition of other utility functions
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#define __TBB_Log2(V)  __TBB_machine_lg(V)
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_intel64.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/linux_intel64.h
new file mode 100644 (file)
index 0000000..d7b94fc
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <stdint.h>
+#include <unistd.h>
+
+#define __TBB_WORDSIZE 8
+#define __TBB_BIG_ENDIAN 0
+
+#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory")
+
+// __TBB_full_memory_fence can be predefined
+#ifndef __TBB_full_memory_fence
+#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory")
+#endif
+
+#define __MACHINE_DECL_ATOMICS(S,T,X) \
+static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand )  \
+{                                                                                    \
+    T result;                                                                        \
+                                                                                     \
+    __asm__ __volatile__("lock\ncmpxchg" X " %2,%1"                                  \
+                          : "=a"(result), "=m"(*(volatile T*)ptr)                    \
+                          : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr)       \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     \
+static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend)              \
+{                                                                                    \
+    T result;                                                                        \
+    __asm__ __volatile__("lock\nxadd" X " %0,%1"                                     \
+                          : "=r"(result),"=m"(*(volatile T*)ptr)                     \
+                          : "0"(addend), "m"(*(volatile T*)ptr)                      \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     \
+static inline  T __TBB_machine_fetchstore##S(volatile void *ptr, T value)            \
+{                                                                                    \
+    T result;                                                                        \
+    __asm__ __volatile__("lock\nxchg" X " %0,%1"                                     \
+                          : "=r"(result),"=m"(*(volatile T*)ptr)                     \
+                          : "0"(value), "m"(*(volatile T*)ptr)                       \
+                          : "memory");                                               \
+    return result;                                                                   \
+}                                                                                    \
+                                                                                     
+__MACHINE_DECL_ATOMICS(1,int8_t,"")
+__MACHINE_DECL_ATOMICS(2,int16_t,"")
+__MACHINE_DECL_ATOMICS(4,int32_t,"")
+__MACHINE_DECL_ATOMICS(8,int64_t,"q")
+
+static inline int64_t __TBB_machine_lg( uint64_t x ) {
+    int64_t j;
+    __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x));
+    return j;
+}
+
+static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) {
+    __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory");
+}
+
+static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) {
+    __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory");
+}
+
+static inline void __TBB_machine_pause( int32_t delay ) {
+    for (int32_t i = 0; i < delay; i++) {
+       __asm__ __volatile__("pause;");
+    }
+    return;
+}
+
+// Machine specific atomic operations
+
+#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C)
+#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C)
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+
+#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V)
+#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V)
+#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V)
+#define __TBB_FetchAndAdd8(P,V)  __TBB_machine_fetchadd8(P,V)
+#define __TBB_FetchAndAddW(P,V)  __TBB_machine_fetchadd8(P,V)
+
+#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V)
+#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V)
+#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V)
+#define __TBB_FetchAndStore8(P,V)  __TBB_machine_fetchstore8(P,V)
+#define __TBB_FetchAndStoreW(P,V)  __TBB_machine_fetchstore8(P,V)
+
+#undef __TBB_Store8
+#undef __TBB_Load8
+
+#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
+#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
+
+// Definition of other functions
+#ifndef __TBB_Pause
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#endif
+#define __TBB_Log2(V)    __TBB_machine_lg(V)
+
+// Special atomic functions
+#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V)
+#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
+#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1)
+
+// Use generic definitions from tbb_machine.h
+#undef __TBB_TryLockByte
+#undef __TBB_LockByte
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/mac_ppc.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/mac_ppc.h
new file mode 100644 (file)
index 0000000..d5b1364
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <stdint.h>
+#include <unistd.h>
+
+inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand )
+{
+    int32_t result;
+
+    __asm__ __volatile__("lwsync\n"
+                         "0: lwarx %0,0,%2\n\t"  /* load w/ reservation */
+                         "cmpw %0,%4\n\t"        /* compare against comparand */
+                         "bne- 1f\n\t"           /* exit if not same */
+                         "stwcx. %3,0,%2\n\t"    /* store new_value */
+                         "bne- 0b\n"             /* retry if reservation lost */
+                         "1: lwsync"               /* the exit */
+                          : "=&r"(result), "=m"(* (int32_t*) ptr)
+                          : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int32_t*) ptr)
+                          : "cr0");
+    return result;
+}
+
+inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand )
+{
+    int64_t result;
+    __asm__ __volatile__("lwsync\n"
+                         "0: ldarx %0,0,%2\n\t"  /* load w/ reservation */
+                         "cmpd %0,%4\n\t"        /* compare against comparand */
+                         "bne- 1f\n\t"           /* exit if not same */
+                         "stdcx. %3,0,%2\n\t"    /* store new_value */
+                         "bne- 0b\n"             /* retry if reservation lost */
+                         "1: lwsync"               /* the exit */
+                          : "=&b"(result), "=m"(* (int64_t*) ptr)
+                          : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int64_t*) ptr)
+                          : "cr0");
+    return result;
+}
+
+#define __TBB_BIG_ENDIAN 1
+
+#if defined(powerpc64) || defined(__powerpc64__) || defined(__ppc64__)
+#define __TBB_WORDSIZE 8
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#else
+#define __TBB_WORDSIZE 4
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#endif
+
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_full_memory_fence() __asm__ __volatile__("sync": : :"memory")
+#define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory")
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/macos_common.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/macos_common.h
new file mode 100644 (file)
index 0000000..ee2412d
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <sched.h>
+#define __TBB_Yield()  sched_yield()
+
+
+// __TBB_DetectNumberOfWorkers
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+static inline int __TBB_macos_available_cpu() {
+    int name[2] = {CTL_HW, HW_AVAILCPU};
+    int ncpu;
+    size_t size = sizeof(ncpu);
+    sysctl( name, 2, &ncpu, &size, NULL, 0 );
+    return ncpu;
+}
+
+#define __TBB_DetectNumberOfWorkers() __TBB_macos_available_cpu()
+
+
+#ifndef __TBB_WORDSIZE
+#define __TBB_WORDSIZE 4
+#endif
+
+#ifndef __TBB_BIG_ENDIAN
+#if __BIG_ENDIAN__
+#define __TBB_BIG_ENDIAN 1
+#else
+#define __TBB_BIG_ENDIAN 0
+#endif
+#endif
+
+
+#if !defined(__TBB_CompareAndSwap4) || !defined(__TBB_CompareAndSwap8)
+
+// Implementation of atomic operations based on OS provided primitives
+#include <libkern/OSAtomic.h>
+
+#define __TBB_release_consistency_helper() OSMemoryBarrier()
+#define __TBB_full_memory_fence()          OSMemoryBarrier()
+
+static inline int32_t __TBB_macos_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand)
+{
+    __TBB_ASSERT( !((uintptr_t)ptr&0x3), "address not properly aligned for Mac OS atomics");
+    int32_t* address = (int32_t*)ptr;
+    while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){
+        int32_t snapshot = *address;
+        if( snapshot!=comparand ) return snapshot;
+    }
+    return comparand;
+}
+
+static inline int64_t __TBB_macos_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand)
+{
+    __TBB_ASSERT( !((uintptr_t)ptr&0x7), "address not properly aligned for Mac OS atomics");
+    int64_t* address = (int64_t*)ptr;
+    while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){
+#if __TBB_WORDSIZE==8
+        int64_t snapshot = *address;
+#else
+        int64_t snapshot = OSAtomicAdd64( 0, address );
+#endif
+        if( snapshot!=comparand ) return snapshot;
+    }
+    return comparand;
+}
+
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_macos_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_macos_cmpswp8(P,V,C)
+
+static inline int32_t __TBB_macos_fetchadd4(volatile void *ptr, int32_t addend)
+{
+    __TBB_ASSERT( !((uintptr_t)ptr&0x3), "address not properly aligned for Mac OS atomics");
+    return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend;
+}
+
+static inline int64_t __TBB_macos_fetchadd8(volatile void *ptr, int64_t addend)
+{
+    __TBB_ASSERT( !((uintptr_t)ptr&0x7), "address not properly aligned for Mac OS atomics");
+    return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend;
+}
+
+#define __TBB_FetchAndAdd4(P,V) __TBB_macos_fetchadd4(P,V)
+#define __TBB_FetchAndAdd8(P,V) __TBB_macos_fetchadd8(P,V)
+
+#if __TBB_WORDSIZE==4
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap4(P,V,C)
+#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd4(P,V)
+#else
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap8(P,V,C)
+#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd8(P,V)
+#endif
+
+#endif /* !defined(__TBB_CompareAndSwap4) || !defined(__TBB_CompareAndSwap8) */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/sunos_sparc.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/sunos_sparc.h
new file mode 100644 (file)
index 0000000..f617689
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <stdint.h>
+#include <unistd.h>
+
+#define __TBB_WORDSIZE 8
+#define __TBB_BIG_ENDIAN 1
+
+#define __TBB_release_consistency_helper() __asm__ __volatile__ ("": : :"memory")
+#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory")
+
+//--------------------------------------------------
+// Compare and swap
+//--------------------------------------------------
+
+/**
+ * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr
+ * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand
+ * @param value value to assign *ptr to if *ptr==comparand
+ * @param comparand value to compare with *ptr
+ ( @return value originally in memory at ptr, regardless of success
+*/
+static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){
+  int32_t result;
+  __asm__ __volatile__(
+                       "cas\t[%5],%4,%1"
+                       : "=m"(*(int32_t *)ptr), "=r"(result)
+                       : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr)
+                       : "memory");
+  return result;
+}
+
+/**
+ * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr
+ * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand
+ * @param value value to assign *ptr to if *ptr==comparand
+ * @param comparand value to compare with *ptr
+ ( @return value originally in memory at ptr, regardless of success
+ */
+static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){
+  int64_t result;
+  __asm__ __volatile__(
+                       "casx\t[%5],%4,%1"
+               : "=m"(*(int64_t *)ptr), "=r"(result)
+               : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr)
+               : "memory");
+  return result;
+}
+
+//---------------------------------------------------
+// Fetch and add
+//---------------------------------------------------
+
+/**
+ * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity
+ * @param ptr pointer to value to add addend to
+ * @param addened value to add to *ptr
+ * @return value at ptr before addened was added
+ */
+static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){
+  int32_t result;
+  __asm__ __volatile__ (                                 
+                        "0:\t add\t %3, %4, %0\n"    // do addition
+                        "\t cas\t [%2], %3, %0\n"        // cas to store result in memory
+                        "\t cmp\t %3, %0\n"            // check if value from memory is original
+                        "\t bne,a,pn\t %%icc, 0b\n"        // if not try again
+                        "\t mov %0, %3\n"            // use branch delay slot to move new value in memory to be added
+               : "=&r"(result), "=m"(*(int32_t *)ptr)
+               : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr)
+               : "ccr", "memory");
+  return result;
+}
+
+/**
+ * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity
+ * @param ptr pointer to value to add addend to
+ * @param addened value to add to *ptr
+ * @return value at ptr before addened was added
+ */
+static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){
+  int64_t result;
+  __asm__ __volatile__ (
+                        "0:\t add\t %3, %4, %0\n"    // do addition
+                        "\t casx\t [%2], %3, %0\n"        // cas to store result in memory
+                        "\t cmp\t %3, %0\n"            // check if value from memory is original
+                        "\t bne,a,pn\t %%xcc, 0b\n"        // if not try again
+                        "\t mov %0, %3\n"            // use branch delay slot to move new value in memory to be added
+                : "=&r"(result), "=m"(*(int64_t *)ptr)
+                : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr)
+                : "ccr", "memory");
+  return result;
+}
+
+//--------------------------------------------------------
+// Logarithm (base two, integer)
+//--------------------------------------------------------
+
+static inline int64_t __TBB_machine_lg( uint64_t x ) {
+    uint64_t count;
+    // one hot encode
+    x |= (x >> 1);
+    x |= (x >> 2);
+    x |= (x >> 4);
+    x |= (x >> 8);
+    x |= (x >> 16);
+    x |= (x >> 32);
+    // count 1's
+    __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) );
+    return count-1;
+}
+
+//--------------------------------------------------------
+
+static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) {
+  __asm__ __volatile__ (
+                        "0:\t or\t %2, %3, %%g1\n" // do addition
+                        "\t casx\t [%1], %2, %%g1\n"            // cas to store result in memory
+                        "\t cmp\t %2, %%g1\n"                   // check if value from memory is original
+                        "\t bne,a,pn\t %%xcc, 0b\n" // if not try again
+                        "\t mov %%g1, %2\n"                     // use branch delay slot to move new value in memory to be added
+                : "=m"(*(int64_t *)ptr)
+                : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr)
+                : "ccr", "g1", "memory");
+}
+
+static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) {
+  __asm__ __volatile__ (
+                        "0:\t and\t %2, %3, %%g1\n"        // do addition
+                        "\t casx\t [%1], %2, %%g1\n"            // cas to store result in memory
+                        "\t cmp\t %2, %%g1\n"                   // check if value from memory is original
+                        "\t bne,a,pn\t %%xcc, 0b\n"         // if not try again
+                        "\t mov %%g1, %2\n"                     // use branch delay slot to move new value in memory to be added
+                : "=m"(*(int64_t *)ptr)
+                : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr)
+                : "ccr", "g1", "memory");
+}
+
+
+static inline void __TBB_machine_pause( int32_t delay ) {
+    // do nothing, inlined, doesnt matter
+}
+
+// put 0xff in memory location, return memory value,
+//  generic trylockbyte puts 0x01, however this is fine
+//  because all that matters is that 0 is unlocked
+static inline bool __TBB_machine_trylockbyte(unsigned char &flag){
+    unsigned char result;
+    __asm__ __volatile__ (
+            "ldstub\t [%2], %0\n"
+        : "=r"(result), "=m"(flag)
+        : "r"(&flag), "m"(flag)
+        : "memory");
+    return result == 0;
+}
+
+
+// Machine specific atomic operations
+
+//#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C)  // use generic version in tbb_machine.h
+//#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C)  // use generic version in tbb_machine.h
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+
+//#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V)       // use generic version in tbb_machine.h
+//#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V)       // use generic version in tbb_machine.h
+#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V)
+#define __TBB_FetchAndAdd8(P,V)  __TBB_machine_fetchadd8(P,V)
+#define __TBB_FetchAndAddW(P,V)  __TBB_machine_fetchadd8(P,V)
+
+// use generic version in tbb_machine.h
+//#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V)  
+//#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V)
+//#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V)
+//#define __TBB_FetchAndStore8(P,V)  __TBB_machine_fetchstore8(P,V)
+//#define __TBB_FetchAndStoreW(P,V)  __TBB_machine_fetchstore8(P,V)
+
+#undef __TBB_Store8
+#undef __TBB_Load8
+
+#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V)
+#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V)
+
+// Definition of other functions
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#define __TBB_Log2(V)    __TBB_machine_lg(V)
+
+// Special atomic functions
+#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V)
+#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
+#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1)
+
+// Definition of Lock functions
+// Repeatedly runs TryLockByte, no need to implement
+#undef __TBB_LockByte
+
+#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P)
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_api.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_api.h
new file mode 100644 (file)
index 0000000..072dc0f
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_windows_api_H
+#define __TBB_machine_windows_api_H
+
+#if _WIN32 || _WIN64
+
+#if _XBOX
+
+#define NONET
+#define NOD3D
+#include <xtl.h>
+
+#else // Assume "usual" Windows
+
+#include <windows.h>
+
+#endif // _XBOX
+
+#if !defined(_WIN32_WINNT)
+// The following Windows API function is declared explicitly;
+// otherwise any user would have to specify /D_WIN32_WINNT=0x0400
+extern "C" BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION );
+#endif
+
+#else
+#error tbb/machine/windows_api.h should only be used for Windows based platforms
+#endif // _WIN32 || _WIN64
+
+#endif // __TBB_machine_windows_api_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_ia32.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_ia32.h
new file mode 100644 (file)
index 0000000..f4afd7c
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#if defined(__INTEL_COMPILER)
+#define __TBB_release_consistency_helper() __asm { __asm nop }
+#elif _MSC_VER >= 1300
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+#define __TBB_release_consistency_helper() _ReadWriteBarrier()
+#else
+#error Unsupported compiler - need to define __TBB_release_consistency_helper to support it
+#endif
+
+#define __TBB_full_memory_fence() __asm { __asm mfence }
+
+#define __TBB_WORDSIZE 4
+#define __TBB_BIG_ENDIAN 0
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (push)
+    #pragma warning (disable: 4244 4267)
+#endif
+
+extern "C" {
+    __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand );
+    __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend );
+    __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value );
+    void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value );
+    __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr);
+}
+
+#define __TBB_DEFINE_ATOMICS(S,T,U,A,C) \
+static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \
+    T result; \
+    volatile T *p = (T *)ptr; \
+    __TBB_release_consistency_helper(); \
+    __asm \
+    { \
+       __asm mov edx, p \
+       __asm mov C , value \
+       __asm mov A , comparand \
+       __asm lock cmpxchg [edx], C \
+       __asm mov result, A \
+    } \
+    __TBB_release_consistency_helper(); \
+    return result; \
+} \
+\
+static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \
+    T result; \
+    volatile T *p = (T *)ptr; \
+    __TBB_release_consistency_helper(); \
+    __asm \
+    { \
+        __asm mov edx, p \
+        __asm mov A, addend \
+        __asm lock xadd [edx], A \
+        __asm mov result, A \
+    } \
+    __TBB_release_consistency_helper(); \
+    return result; \
+}\
+\
+static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \
+    T result; \
+    volatile T *p = (T *)ptr; \
+    __TBB_release_consistency_helper(); \
+    __asm \
+    { \
+        __asm mov edx, p \
+        __asm mov A, value \
+        __asm lock xchg [edx], A \
+        __asm mov result, A \
+    } \
+    __TBB_release_consistency_helper(); \
+    return result; \
+}
+
+__TBB_DEFINE_ATOMICS(1, __int8, __int8, al, cl)
+__TBB_DEFINE_ATOMICS(2, __int16, __int16, ax, cx)
+__TBB_DEFINE_ATOMICS(4, __int32, __int32, eax, ecx)
+__TBB_DEFINE_ATOMICS(W, ptrdiff_t, ptrdiff_t, eax, ecx)
+
+static inline __int32 __TBB_machine_lg( unsigned __int64 i ) {
+    unsigned __int32 j;
+    __asm
+    {
+        bsr eax, i
+        mov j, eax
+    }
+    return j;
+}
+
+static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) {
+   __asm 
+   {
+       mov eax, addend
+       mov edx, [operand]
+       lock or [edx], eax
+   }
+}
+
+static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) {
+   __asm 
+   {
+       mov eax, addend
+       mov edx, [operand]
+       lock and [edx], eax
+   }
+}
+
+static inline void __TBB_machine_pause (__int32 delay ) {
+    _asm 
+    {
+        mov eax, delay
+      L1: 
+        pause
+        add eax, -1
+        jne L1  
+    }
+    return;
+}
+
+#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C)
+#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C)
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswpW(P,V,C)
+
+#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V)
+#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V)
+#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V)
+#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V)
+#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchaddW(P,V)
+
+#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V)
+#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V)
+#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V)
+#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V)
+#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstoreW(P,V)
+
+// Should define this: 
+#define __TBB_Store8(P,V) __TBB_machine_store8(P,V)
+#define __TBB_Load8(P) __TBB_machine_load8(P)
+#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)
+#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)
+
+// Definition of other functions
+extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
+#define __TBB_Yield()  SwitchToThread()
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#define __TBB_Log2(V)    __TBB_machine_lg(V)
+
+// Use generic definitions from tbb_machine.h
+#undef __TBB_TryLockByte
+#undef __TBB_LockByte
+
+#if defined(_MSC_VER)&&_MSC_VER<1400
+    static inline void* __TBB_machine_get_current_teb () {
+        void* pteb;
+        __asm mov eax, fs:[0x18]
+        __asm mov pteb, eax
+        return pteb;
+    }
+#endif
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warnings 4244, 4267 are back
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_intel64.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/windows_intel64.h
new file mode 100644 (file)
index 0000000..1da213b
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#include <intrin.h>
+#if !defined(__INTEL_COMPILER)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedCompareExchange64)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedExchange64)
+#endif /* !defined(__INTEL_COMPILER) */
+
+#if defined(__INTEL_COMPILER)
+#define __TBB_release_consistency_helper() __asm { __asm nop }
+#define __TBB_full_memory_fence() __asm { __asm mfence }
+#elif _MSC_VER >= 1300
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+#define __TBB_release_consistency_helper() _ReadWriteBarrier()
+#pragma intrinsic(_mm_mfence)
+#define __TBB_full_memory_fence() _mm_mfence()
+#endif
+
+#define __TBB_WORDSIZE 8
+#define __TBB_BIG_ENDIAN 0
+
+// ATTENTION: if you ever change argument types in machine-specific primitives,
+// please take care of atomic_word<> specializations in tbb/atomic.h
+extern "C" {
+    __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand );
+    __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend );
+    __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value );
+    __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand );
+    __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend );
+    __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value );
+    void __TBB_EXPORTED_FUNC __TBB_machine_pause (__int32 delay );
+}
+
+
+#if !__INTEL_COMPILER
+extern "C" unsigned char _BitScanReverse64( unsigned long* i, unsigned __int64 w );
+#pragma intrinsic(_BitScanReverse64)
+#endif
+
+inline __int64 __TBB_machine_lg( unsigned __int64 i ) {
+#if __INTEL_COMPILER
+    unsigned __int64 j;
+    __asm
+    {
+        bsr rax, i
+        mov j, rax
+    }
+#else
+    unsigned long j;
+    _BitScanReverse64( &j, i );
+#endif
+    return j;
+}
+
+inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) {
+    _InterlockedOr64((__int64*)operand, addend); 
+}
+
+inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) {
+    _InterlockedAnd64((__int64*)operand, addend); 
+}
+
+#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C)
+#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C)
+#define __TBB_CompareAndSwap4(P,V,C) _InterlockedCompareExchange( (long*) P , V , C ) 
+#define __TBB_CompareAndSwap8(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C )
+#define __TBB_CompareAndSwapW(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C )
+
+#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V)
+#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V)
+#define __TBB_FetchAndAdd4(P,V) _InterlockedExchangeAdd((long*) P , V )
+#define __TBB_FetchAndAdd8(P,V) _InterlockedExchangeAdd64((__int64*) P , V )
+#define __TBB_FetchAndAddW(P,V) _InterlockedExchangeAdd64((__int64*) P , V )
+
+#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V)
+#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V)
+#define __TBB_FetchAndStore4(P,V) _InterlockedExchange((long*) P , V )
+#define __TBB_FetchAndStore8(P,V) _InterlockedExchange64((__int64*) P , V )
+#define __TBB_FetchAndStoreW(P,V) _InterlockedExchange64((__int64*) P , V ) 
+
+// Not used if wordsize == 8
+#undef __TBB_Store8
+#undef __TBB_Load8
+
+#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V)
+#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V)
+
+extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
+#define __TBB_Yield()  SwitchToThread()
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+#define __TBB_Log2(V)    __TBB_machine_lg(V)
+
+// Use generic definitions from tbb_machine.h
+#undef __TBB_TryLockByte
+#undef __TBB_LockByte
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/xbox360_ppc.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/machine/xbox360_ppc.h
new file mode 100644 (file)
index 0000000..ed2529c
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#error Do not include this file directly; include tbb_machine.h instead
+#endif
+
+#define NONET
+#define NOD3D
+#include "xtl.h"    
+#include "ppcintrinsics.h"
+
+#if _MSC_VER >= 1300
+extern "C" void _MemoryBarrier();
+#pragma intrinsic(_MemoryBarrier)
+#define __TBB_release_consistency_helper() _MemoryBarrier()
+#endif
+
+#define __TBB_full_memory_fence() __sync()
+
+#define __TBB_WORDSIZE 4
+#define __TBB_BIG_ENDIAN 1
+
+//todo: define __TBB_DECL_FENCED_ATOMICS and define acquire/release primitives to maximize performance
+
+typedef __int64 int64_t;  //required for definition of Store8/Load8 in atomic.h
+typedef unsigned char uint8_t;  //same reason
+
+inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand )
+{                               
+ __lwsync();
+ __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand);
+ __lwsync();
+ return result;
+}
+
+inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand )
+{
+ __lwsync();
+ __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand);
+ __lwsync();
+ return result;
+}
+
+#pragma optimize( "", off )
+inline void __TBB_machine_pause (__int32 delay ) 
+{
+ for (__int32 i=0; i<delay; i++) {;};
+}
+#pragma optimize( "", on ) 
+
+
+#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C)
+#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C)
+#define __TBB_Yield()  Sleep(0)
+#define __TBB_Pause(V) __TBB_machine_pause(V)
+
+// This port uses only 2 hardware threads for TBB on XBOX 360. 
+// Others are left to sound etc.
+// Change the following mask to allow TBB use more HW threads.
+static const int __TBB_XBOX360_HARDWARE_THREAD_MASK = 0x0C;
+
+static inline int __TBB_XBOX360_DetectNumberOfWorkers() 
+{
+     char a[__TBB_XBOX360_HARDWARE_THREAD_MASK];  //compile time assert - at least one bit should be set always
+     a[0]=0;
+
+     return ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 0) & 1) +
+            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) +
+            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) +
+            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) +
+            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) +
+            ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1;  // +1 accomodates for the master thread
+}
+
+static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex)
+{
+    workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1;
+    int m = __TBB_XBOX360_HARDWARE_THREAD_MASK;
+    int index = 0;
+    int skipcount = workerThreadIndex;
+    while (true)
+    {
+        if ((m & 1)!=0) 
+        {
+            if (skipcount==0) break;
+            skipcount--;
+        }
+        m >>= 1;
+       index++;
+    }
+    return index; 
+}
+
+#define __TBB_DetectNumberOfWorkers() __TBB_XBOX360_DetectNumberOfWorkers()
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/mutex.h
new file mode 100644 (file)
index 0000000..dfa806f
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_mutex_H
+#define __TBB_mutex_H
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#else
+#include <pthread.h>
+#endif /* _WIN32||_WIN64 */
+
+#include <new>
+#include "aligned_space.h"
+#include "tbb_stddef.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+//! Wrapper around the platform's native reader-writer lock.
+/** For testing purposes only.
+    @ingroup synchronization */
+class mutex {
+public:
+    //! Construct unacquired mutex.
+    mutex() {
+#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS
+    internal_construct();
+#else
+  #if _WIN32||_WIN64
+        InitializeCriticalSection(&impl);
+  #else
+        int error_code = pthread_mutex_init(&impl,NULL);
+        if( error_code )
+            tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed");
+  #endif /* _WIN32||_WIN64*/
+#endif /* TBB_USE_ASSERT */
+    };
+
+    ~mutex() {
+#if TBB_USE_ASSERT
+        internal_destroy();
+#else
+  #if _WIN32||_WIN64
+        DeleteCriticalSection(&impl);
+  #else
+        pthread_mutex_destroy(&impl); 
+
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    };
+
+    class scoped_lock;
+    friend class scoped_lock;
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock : internal::no_copy {
+    public:
+        //! Construct lock that has not acquired a mutex. 
+        scoped_lock() : my_mutex(NULL) {};
+
+        //! Acquire lock on given mutex.
+        scoped_lock( mutex& mutex ) {
+            acquire( mutex );
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( my_mutex ) 
+                release();
+        }
+
+        //! Acquire lock on given mutex.
+        void acquire( mutex& mutex ) {
+#if TBB_USE_ASSERT
+            internal_acquire(mutex);
+#else
+            mutex.lock();
+            my_mutex = &mutex;
+#endif /* TBB_USE_ASSERT */
+        }
+
+        //! Try acquire lock on given mutex.
+        bool try_acquire( mutex& mutex ) {
+#if TBB_USE_ASSERT
+            return internal_try_acquire (mutex);
+#else
+            bool result = mutex.try_lock();
+            if( result )
+                my_mutex = &mutex;
+            return result;
+#endif /* TBB_USE_ASSERT */
+        }
+
+        //! Release lock
+        void release() {
+#if TBB_USE_ASSERT
+            internal_release ();
+#else
+            my_mutex->unlock();
+            my_mutex = NULL;
+#endif /* TBB_USE_ASSERT */
+        }
+
+    private:
+        //! The pointer to the current mutex to work
+        mutex* my_mutex;
+
+        //! All checks from acquire using mutex.state were moved here
+        void __TBB_EXPORTED_METHOD internal_acquire( mutex& m );
+
+        //! All checks from try_acquire using mutex.state were moved here
+        bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m );
+
+        //! All checks from release using mutex.state were moved here
+        void __TBB_EXPORTED_METHOD internal_release();
+
+        friend class mutex;
+    };
+
+    // Mutex traits
+    static const bool is_rw_mutex = false;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = false;
+
+    // ISO C++0x compatibility methods
+
+    //! Acquire lock
+    void lock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        new(tmp.begin()) scoped_lock(*this);
+#else
+  #if _WIN32||_WIN64
+        EnterCriticalSection(&impl);
+  #else
+        pthread_mutex_lock(&impl);
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Try acquiring lock (non-blocking)
+    /** Return true if lock acquired; false otherwise. */
+    bool try_lock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        scoped_lock& s = *tmp.begin();
+        s.my_mutex = NULL;
+        return s.internal_try_acquire(*this);
+#else
+  #if _WIN32||_WIN64
+        return TryEnterCriticalSection(&impl)!=0;
+  #else
+        return pthread_mutex_trylock(&impl)==0;
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Release lock
+    void unlock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        scoped_lock& s = *tmp.begin();
+        s.my_mutex = this;
+        s.internal_release();
+#else
+  #if _WIN32||_WIN64
+        LeaveCriticalSection(&impl);
+  #else
+        pthread_mutex_unlock(&impl);
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Return native_handle
+  #if _WIN32||_WIN64
+    typedef LPCRITICAL_SECTION native_handle_type;
+  #else
+    typedef pthread_mutex_t* native_handle_type;
+  #endif
+    native_handle_type native_handle() { return (native_handle_type) &impl; }
+
+    enum state_t {
+        INITIALIZED=0x1234,
+        DESTROYED=0x789A,
+        HELD=0x56CD
+    };
+private:
+#if _WIN32||_WIN64
+    CRITICAL_SECTION impl;    
+    enum state_t state;
+#else
+    pthread_mutex_t impl;
+#endif /* _WIN32||_WIN64 */
+
+    //! All checks from mutex constructor using mutex.state were moved here
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    //! All checks from mutex destructor using mutex.state were moved here
+    void __TBB_EXPORTED_METHOD internal_destroy();
+
+#if _WIN32||_WIN64
+public:
+    //!  Set the internal state
+    void set_state( state_t to ) { state = to; }
+#endif
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(mutex)
+
+} // namespace tbb 
+
+#endif /* __TBB_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_mutex.h
new file mode 100644 (file)
index 0000000..67aabd5
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_null_mutex_H
+#define __TBB_null_mutex_H
+
+namespace tbb {
+    
+//! A mutex which does nothing
+/** A null_mutex does no operation and simulates success.
+    @ingroup synchronization */
+class null_mutex {   
+    //! Deny assignment and copy construction 
+    null_mutex( const null_mutex& );   
+    void operator=( const null_mutex& );   
+public:   
+    //! Represents acquisition of a mutex.
+    class scoped_lock {   
+    public:   
+        scoped_lock() {}
+        scoped_lock( null_mutex& ) {}   
+        ~scoped_lock() {}
+        void acquire( null_mutex& ) {}
+        bool try_acquire( null_mutex& ) { return true; }
+        void release() {}
+    };
+  
+    null_mutex() {}
+    
+    // Mutex traits   
+    static const bool is_rw_mutex = false;   
+    static const bool is_recursive_mutex = true;
+    static const bool is_fair_mutex = true;
+};  
+
+}
+
+#endif /* __TBB_null_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_rw_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/null_rw_mutex.h
new file mode 100644 (file)
index 0000000..2462389
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_null_rw_mutex_H
+#define __TBB_null_rw_mutex_H
+
+namespace tbb {
+    
+//! A rw mutex which does nothing
+/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation.
+    @ingroup synchronization */
+class null_rw_mutex {
+    //! Deny assignment and copy construction 
+    null_rw_mutex( const null_rw_mutex& );   
+    void operator=( const null_rw_mutex& );   
+public:   
+    //! Represents acquisition of a mutex.
+    class scoped_lock {   
+    public:   
+        scoped_lock() {}
+        scoped_lock( null_rw_mutex& , bool = true ) {}
+        ~scoped_lock() {}
+        void acquire( null_rw_mutex& , bool = true ) {}
+        bool upgrade_to_writer() { return true; }
+        bool downgrade_to_reader() { return true; }
+        bool try_acquire( null_rw_mutex& , bool = true ) { return true; }
+        void release() {}
+    };
+  
+    null_rw_mutex() {}
+    
+    // Mutex traits   
+    static const bool is_rw_mutex = true;   
+    static const bool is_recursive_mutex = true;
+    static const bool is_fair_mutex = true;
+};  
+
+}
+
+#endif /* __TBB_null_rw_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_do.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_do.h
new file mode 100644 (file)
index 0000000..6f91f72
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_do_H
+#define __TBB_parallel_do_H
+
+#include "task.h"
+#include "aligned_space.h"
+#include <iterator>
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    template<typename Body, typename Item> class parallel_do_feeder_impl;
+    template<typename Body> class do_group_task;
+
+    //! Strips its template type argument from 'cv' and '&' qualifiers
+    template<typename T>
+    struct strip { typedef T type; };
+    template<typename T>
+    struct strip<T&> { typedef T type; };
+    template<typename T>
+    struct strip<const T&> { typedef T type; };
+    template<typename T>
+    struct strip<volatile T&> { typedef T type; };
+    template<typename T>
+    struct strip<const volatile T&> { typedef T type; };
+    // Most of the compilers remove cv-qualifiers from non-reference function argument types. 
+    // But unfortunately there are those that don't.
+    template<typename T>
+    struct strip<const T> { typedef T type; };
+    template<typename T>
+    struct strip<volatile T> { typedef T type; };
+    template<typename T>
+    struct strip<const volatile T> { typedef T type; };
+} // namespace internal
+//! @endcond
+
+//! Class the user supplied algorithm body uses to add new tasks
+/** \param Item Work item type **/
+template<typename Item>
+class parallel_do_feeder: internal::no_copy
+{
+    parallel_do_feeder() {}
+    virtual ~parallel_do_feeder () {}
+    virtual void internal_add( const Item& item ) = 0;
+    template<typename Body_, typename Item_> friend class internal::parallel_do_feeder_impl;
+public:
+    //! Add a work item to a running parallel_do.
+    void add( const Item& item ) {internal_add(item);}
+};
+
+//! @cond INTERNAL
+namespace internal {
+    //! For internal use only.
+    /** Selects one of the two possible forms of function call member operator.
+        @ingroup algorithms **/
+    template<class Body, typename Item>
+    class parallel_do_operator_selector
+    {
+        typedef parallel_do_feeder<Item> Feeder;
+        template<typename A1, typename A2, typename CvItem >
+        static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) {
+            obj(arg1);
+        }
+        template<typename A1, typename A2, typename CvItem >
+        static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder<Item>&) const ) {
+            obj(arg1, arg2);
+        }
+
+    public:
+        template<typename A1, typename A2 >
+        static void call( const Body& obj, A1& arg1, A2& arg2 )
+        {
+            internal_call( obj, arg1, arg2, &Body::operator() );
+        }
+    };
+
+    //! For internal use only.
+    /** Executes one iteration of a do.
+        @ingroup algorithms */
+    template<typename Body, typename Item>
+    class do_iteration_task: public task
+    {
+        typedef parallel_do_feeder_impl<Body, Item> feeder_type;
+
+        Item my_value;
+        feeder_type& my_feeder;
+
+        do_iteration_task( const Item& value, feeder_type& feeder ) : 
+            my_value(value), my_feeder(feeder)
+        {}
+
+        /*override*/ 
+        task* execute()
+        {
+            parallel_do_operator_selector<Body, Item>::call(*my_feeder.my_body, my_value, my_feeder);
+            return NULL;
+        }
+
+        template<typename Body_, typename Item_> friend class parallel_do_feeder_impl;
+    }; // class do_iteration_task
+
+    template<typename Iterator, typename Body, typename Item>
+    class do_iteration_task_iter: public task
+    {
+        typedef parallel_do_feeder_impl<Body, Item> feeder_type;
+
+        Iterator my_iter;
+        feeder_type& my_feeder;
+
+        do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : 
+            my_iter(iter), my_feeder(feeder)
+        {}
+
+        /*override*/ 
+        task* execute()
+        {
+            parallel_do_operator_selector<Body, Item>::call(*my_feeder.my_body, *my_iter, my_feeder);
+            return NULL;
+        }
+
+        template<typename Iterator_, typename Body_, typename Item_> friend class do_group_task_forward;    
+        template<typename Body_, typename Item_> friend class do_group_task_input;    
+        template<typename Iterator_, typename Body_, typename Item_> friend class do_task_iter;    
+    }; // class do_iteration_task_iter
+
+    //! For internal use only.
+    /** Implements new task adding procedure.
+        @ingroup algorithms **/
+    template<class Body, typename Item>
+    class parallel_do_feeder_impl : public parallel_do_feeder<Item>
+    {
+        /*override*/ 
+        void internal_add( const Item& item )
+        {
+            typedef do_iteration_task<Body, Item> iteration_type;
+
+            iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this);
+
+            t.spawn( t );
+        }
+    public:
+        const Body* my_body;
+        empty_task* my_barrier;
+
+        parallel_do_feeder_impl()
+        {
+            my_barrier = new( task::allocate_root() ) empty_task();
+            __TBB_ASSERT(my_barrier, "root task allocation failed");
+        }
+
+#if __TBB_TASK_GROUP_CONTEXT
+        parallel_do_feeder_impl(tbb::task_group_context &context)
+        {
+            my_barrier = new( task::allocate_root(context) ) empty_task();
+            __TBB_ASSERT(my_barrier, "root task allocation failed");
+        }
+#endif
+
+        ~parallel_do_feeder_impl()
+        {
+            my_barrier->destroy(*my_barrier);
+        }
+    }; // class parallel_do_feeder_impl
+
+
+    //! For internal use only
+    /** Unpacks a block of iterations.
+        @ingroup algorithms */
+    
+    template<typename Iterator, typename Body, typename Item>
+    class do_group_task_forward: public task
+    {
+        static const size_t max_arg_size = 4;         
+
+        typedef parallel_do_feeder_impl<Body, Item> feeder_type;
+
+        feeder_type& my_feeder;
+        Iterator my_first;
+        size_t my_size;
+        
+        do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) 
+            : my_feeder(feeder), my_first(first), my_size(size)
+        {}
+
+        /*override*/ task* execute()
+        {
+            typedef do_iteration_task_iter<Iterator, Body, Item> iteration_type;
+            __TBB_ASSERT( my_size>0, NULL );
+            task_list list;
+            task* t; 
+            size_t k=0; 
+            for(;;) {
+                t = new( allocate_child() ) iteration_type( my_first, my_feeder );
+                ++my_first;
+                if( ++k==my_size ) break;
+                list.push_back(*t);
+            }
+            set_ref_count(int(k+1));
+            spawn(list);
+            spawn_and_wait_for_all(*t);
+            return NULL;
+        }
+
+        template<typename Iterator_, typename Body_, typename _Item> friend class do_task_iter;
+    }; // class do_group_task_forward
+
+    template<typename Body, typename Item>
+    class do_group_task_input: public task
+    {
+        static const size_t max_arg_size = 4;         
+        
+        typedef parallel_do_feeder_impl<Body, Item> feeder_type;
+
+        feeder_type& my_feeder;
+        size_t my_size;
+        aligned_space<Item, max_arg_size> my_arg;
+
+        do_group_task_input( feeder_type& feeder ) 
+            : my_feeder(feeder), my_size(0)
+        {}
+
+        /*override*/ task* execute()
+        {
+            typedef do_iteration_task_iter<Item*, Body, Item> iteration_type;
+            __TBB_ASSERT( my_size>0, NULL );
+            task_list list;
+            task* t; 
+            size_t k=0; 
+            for(;;) {
+                t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder );
+                if( ++k==my_size ) break;
+                list.push_back(*t);
+            }
+            set_ref_count(int(k+1));
+            spawn(list);
+            spawn_and_wait_for_all(*t);
+            return NULL;
+        }
+
+        ~do_group_task_input(){
+            for( size_t k=0; k<my_size; ++k)
+                (my_arg.begin() + k)->~Item();
+        }
+
+        template<typename Iterator_, typename Body_, typename Item_> friend class do_task_iter;
+    }; // class do_group_task_input
+    
+    //! For internal use only.
+    /** Gets block of iterations and packages them into a do_group_task.
+        @ingroup algorithms */
+    template<typename Iterator, typename Body, typename Item>
+    class do_task_iter: public task
+    {
+        typedef parallel_do_feeder_impl<Body, Item> feeder_type;
+
+    public:
+        do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : 
+            my_first(first), my_last(last), my_feeder(feeder)
+        {}
+
+    private:
+        Iterator my_first;
+        Iterator my_last;
+        feeder_type& my_feeder;
+
+        /* Do not merge run(xxx) and run_xxx() methods. They are separated in order
+            to make sure that compilers will eliminate unused argument of type xxx
+            (that is will not put it on stack). The sole purpose of this argument 
+            is overload resolution.
+            
+            An alternative could be using template functions, but explicit specialization 
+            of member function templates is not supported for non specialized class 
+            templates. Besides template functions would always fall back to the least 
+            efficient variant (the one for input iterators) in case of iterators having 
+            custom tags derived from basic ones. */
+        /*override*/ task* execute()
+        {
+            typedef typename std::iterator_traits<Iterator>::iterator_category iterator_tag;
+            return run( (iterator_tag*)NULL );
+        }
+
+        /** This is the most restricted variant that operates on input iterators or
+            iterators with unknown tags (tags not derived from the standard ones). **/
+        inline task* run( void* ) { return run_for_input_iterator(); }
+        
+        task* run_for_input_iterator() {
+            typedef do_group_task_input<Body, Item> block_type;
+
+            block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder);
+            size_t k=0; 
+            while( !(my_first == my_last) ) {
+                new (t.my_arg.begin() + k) Item(*my_first);
+                ++my_first;
+                if( ++k==block_type::max_arg_size ) {
+                    if ( !(my_first == my_last) )
+                        recycle_to_reexecute();
+                    break;
+                }
+            }
+            if( k==0 ) {
+                destroy(t);
+                return NULL;
+            } else {
+                t.my_size = k;
+                return &t;
+            }
+        }
+
+        inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); }
+
+        task* run_for_forward_iterator() {
+            typedef do_group_task_forward<Iterator, Body, Item> block_type;
+
+            Iterator first = my_first;
+            size_t k=0; 
+            while( !(my_first==my_last) ) {
+                ++my_first;
+                if( ++k==block_type::max_arg_size ) {
+                    if ( !(my_first==my_last) )
+                        recycle_to_reexecute();
+                    break;
+                }
+            }
+            return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder);
+        }
+        
+        inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); }
+
+        task* run_for_random_access_iterator() {
+            typedef do_group_task_forward<Iterator, Body, Item> block_type;
+            typedef do_iteration_task_iter<Iterator, Body, Item> iteration_type;
+            
+            size_t k = static_cast<size_t>(my_last-my_first); 
+            if( k > block_type::max_arg_size ) {
+                Iterator middle = my_first + k/2;
+
+                empty_task& c = *new( allocate_continuation() ) empty_task;
+                do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder);
+                recycle_as_child_of(c);
+
+                my_last = middle;
+                c.set_ref_count(2);
+                c.spawn(b);
+                return this;
+            }else if( k != 0 ) {
+                task_list list;
+                task* t; 
+                size_t k1=0; 
+                for(;;) {
+                    t = new( allocate_child() ) iteration_type(my_first, my_feeder);
+                    ++my_first;
+                    if( ++k1==k ) break;
+                    list.push_back(*t);
+                }
+                set_ref_count(int(k+1));
+                spawn(list);
+                spawn_and_wait_for_all(*t);
+            }
+            return NULL;
+        }
+    }; // class do_task_iter
+
+    //! For internal use only.
+    /** Implements parallel iteration over a range.
+        @ingroup algorithms */
+    template<typename Iterator, typename Body, typename Item> 
+    void run_parallel_do( Iterator first, Iterator last, const Body& body
+#if __TBB_TASK_GROUP_CONTEXT
+        , task_group_context& context
+#endif
+        )
+    {
+        typedef do_task_iter<Iterator, Body, Item> root_iteration_task;
+#if __TBB_TASK_GROUP_CONTEXT
+        parallel_do_feeder_impl<Body, Item> feeder(context);
+#else
+        parallel_do_feeder_impl<Body, Item> feeder;
+#endif
+        feeder.my_body = &body;
+
+        root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder);
+
+        feeder.my_barrier->set_ref_count(2);
+        feeder.my_barrier->spawn_and_wait_for_all(t);
+    }
+
+    //! For internal use only.
+    /** Detects types of Body's operator function arguments.
+        @ingroup algorithms **/
+    template<typename Iterator, typename Body, typename Item> 
+    void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const
+#if __TBB_TASK_GROUP_CONTEXT
+        , task_group_context& context 
+#endif // __TBB_TASK_GROUP_CONTEXT 
+        )
+    {
+        run_parallel_do<Iterator, Body, typename strip<Item>::type>( first, last, body
+#if __TBB_TASK_GROUP_CONTEXT
+            , context
+#endif // __TBB_TASK_GROUP_CONTEXT 
+            );
+    }
+
+    //! For internal use only.
+    /** Detects types of Body's operator function arguments.
+        @ingroup algorithms **/
+    template<typename Iterator, typename Body, typename Item, typename _Item> 
+    void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const
+#if __TBB_TASK_GROUP_CONTEXT
+        , task_group_context& context 
+#endif // __TBB_TASK_GROUP_CONTEXT
+        )
+    {
+        run_parallel_do<Iterator, Body, typename strip<Item>::type>( first, last, body
+#if __TBB_TASK_GROUP_CONTEXT
+            , context
+#endif // __TBB_TASK_GROUP_CONTEXT
+            );
+    }
+
+} // namespace internal
+//! @endcond
+
+
+/** \page parallel_do_body_req Requirements on parallel_do body
+    Class \c Body implementing the concept of parallel_do body must define:
+    - \code 
+        B::operator()( 
+                cv_item_type item,
+                parallel_do_feeder<item_type>& feeder
+        ) const
+        
+        OR
+
+        B::operator()( cv_item_type& item ) const
+      \endcode                                                      Process item. 
+                                                                    May be invoked concurrently  for the same \c this but different \c item.
+                                                        
+    - \code item_type( const item_type& ) \endcode 
+                                                                    Copy a work item.
+    - \code ~item_type() \endcode                            Destroy a work item
+**/
+
+/** \name parallel_do
+    See also requirements on \ref parallel_do_body_req "parallel_do Body". **/
+//@{
+//! Parallel iteration over a range, with optional addition of more work.
+/** @ingroup algorithms */
+template<typename Iterator, typename Body> 
+void parallel_do( Iterator first, Iterator last, const Body& body )
+{
+    if ( first == last )
+        return;
+#if __TBB_TASK_GROUP_CONTEXT
+    task_group_context context;
+#endif // __TBB_TASK_GROUP_CONTEXT
+    internal::select_parallel_do( first, last, body, &Body::operator()
+#if __TBB_TASK_GROUP_CONTEXT
+        , context
+#endif // __TBB_TASK_GROUP_CONTEXT
+        );
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+//! Parallel iteration over a range, with optional addition of more work and user-supplied context
+/** @ingroup algorithms */
+template<typename Iterator, typename Body> 
+void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context  )
+{
+    if ( first == last )
+        return;
+    internal::select_parallel_do( first, last, body, &Body::operator(), context );
+}
+#endif // __TBB_TASK_GROUP_CONTEXT
+
+//@}
+
+} // namespace 
+
+#endif /* __TBB_parallel_do_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for.h
new file mode 100644 (file)
index 0000000..3cb911a
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_for_H
+#define __TBB_parallel_for_H
+
+#include "task.h"
+#include "partitioner.h"
+#include "blocked_range.h"
+#include <new>
+#include "tbb_exception.h"
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! Task type used in parallel_for
+    /** @ingroup algorithms */
+    template<typename Range, typename Body, typename Partitioner>
+    class start_for: public task {
+        Range my_range;
+        const Body my_body;
+        typename Partitioner::partition_type my_partition;
+        /*override*/ task* execute();
+
+        //! Constructor for root task.
+        start_for( const Range& range, const Body& body, Partitioner& partitioner ) :
+            my_range(range),    
+            my_body(body),
+            my_partition(partitioner)
+        {
+        }
+        //! Splitting constructor used to generate children.
+        /** this becomes left child.  Newly constructed object is right child. */
+        start_for( start_for& parent_, split ) :
+            my_range(parent_.my_range,split()),    
+            my_body(parent_.my_body),
+            my_partition(parent_.my_partition,split())
+        {
+            my_partition.set_affinity(*this);
+        }
+        //! Update affinity info, if any.
+        /*override*/ void note_affinity( affinity_id id ) {
+            my_partition.note_affinity( id );
+        }
+    public:
+        static void run(  const Range& range, const Body& body, const Partitioner& partitioner ) {
+            if( !range.empty() ) {
+#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
+                start_for& a = *new(task::allocate_root()) start_for(range,body,const_cast<Partitioner&>(partitioner));
+#else
+                // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
+                // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
+                task_group_context context;
+                start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast<Partitioner&>(partitioner));
+#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
+                task::spawn_root_and_wait(a);
+            }
+        }
+#if __TBB_TASK_GROUP_CONTEXT
+        static void run(  const Range& range, const Body& body, const Partitioner& partitioner, task_group_context& context ) {
+            if( !range.empty() ) {
+                start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast<Partitioner&>(partitioner));
+                task::spawn_root_and_wait(a);
+            }
+        }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    };
+
+    template<typename Range, typename Body, typename Partitioner>
+    task* start_for<Range,Body,Partitioner>::execute() {
+        if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) {
+            my_body( my_range );
+            return my_partition.continue_after_execute_range(); 
+        } else {
+            empty_task& c = *new( this->allocate_continuation() ) empty_task;
+            recycle_as_child_of(c);
+            c.set_ref_count(2);
+            bool delay = my_partition.decide_whether_to_delay();
+            start_for& b = *new( c.allocate_child() ) start_for(*this,split());
+            my_partition.spawn_or_delay(delay,b);
+            return this;
+        }
+    } 
+} // namespace internal
+//! @endcond
+
+
+// Requirements on Range concept are documented in blocked_range.h
+
+/** \page parallel_for_body_req Requirements on parallel_for body
+    Class \c Body implementing the concept of parallel_for body must define:
+    - \code Body::Body( const Body& ); \endcode                 Copy constructor
+    - \code Body::~Body(); \endcode                             Destructor
+    - \code void Body::operator()( Range& r ) const; \endcode   Function call operator applying the body to range \c r.
+**/
+
+/** \name parallel_for
+    See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/
+//@{
+
+//! Parallel iteration over range with default partitioner. 
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body ) {
+    internal::start_for<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,body,__TBB_DEFAULT_PARTITIONER());
+}
+
+//! Parallel iteration over range with simple partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) {
+    internal::start_for<Range,Body,simple_partitioner>::run(range,body,partitioner);
+}
+
+//! Parallel iteration over range with auto_partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) {
+    internal::start_for<Range,Body,auto_partitioner>::run(range,body,partitioner);
+}
+
+//! Parallel iteration over range with affinity_partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) {
+    internal::start_for<Range,Body,affinity_partitioner>::run(range,body,partitioner);
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+//! Parallel iteration over range with simple partitioner and user-supplied context.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
+    internal::start_for<Range,Body,simple_partitioner>::run(range, body, partitioner, context);
+}
+
+//! Parallel iteration over range with auto_partitioner and user-supplied context.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) {
+    internal::start_for<Range,Body,auto_partitioner>::run(range, body, partitioner, context);
+}
+
+//! Parallel iteration over range with affinity_partitioner and user-supplied context.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) {
+    internal::start_for<Range,Body,affinity_partitioner>::run(range,body,partitioner, context);
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+//@}
+
+//! @cond INTERNAL
+namespace internal {
+    //! Calls the function with values from range [begin, end) with a step provided
+template<typename Function, typename Index>
+class parallel_for_body : internal::no_assign {
+    const Function &my_func;
+    const Index my_begin;
+    const Index my_step; 
+public:
+    parallel_for_body( const Function& _func, Index& _begin, Index& _step) 
+        : my_func(_func), my_begin(_begin), my_step(_step) {}
+    
+    void operator()( tbb::blocked_range<Index>& r ) const {
+        for( Index i = r.begin(),  k = my_begin + i * my_step; i < r.end(); i++, k = k + my_step)
+            my_func( k );
+    }
+};
+} // namespace internal
+//! @endcond
+
+namespace strict_ppl {
+
+//@{
+//! Parallel iteration over a range of integers with a step provided
+template <typename Index, typename Function>
+void parallel_for(Index first, Index last, Index step, const Function& f) {
+    tbb::task_group_context context;
+    parallel_for(first, last, step, f, context);
+}
+template <typename Index, typename Function>
+void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) {
+    if (step <= 0 )
+        internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument
+    else if (last > first) {
+        // Above "else" avoids "potential divide by zero" warning on some platforms
+        Index end = (last - first - Index(1)) / step + Index(1);
+        tbb::blocked_range<Index> range(static_cast<Index>(0), end);
+        internal::parallel_for_body<Function, Index> body(f, first, step);
+        tbb::parallel_for(range, body, tbb::auto_partitioner(), context);
+    }
+}
+//! Parallel iteration over a range of integers with a default step value
+template <typename Index, typename Function>
+void parallel_for(Index first, Index last, const Function& f) {
+    tbb::task_group_context context;
+    parallel_for(first, last, static_cast<Index>(1), f, context);
+}
+template <typename Index, typename Function>
+void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) {
+    parallel_for(first, last, static_cast<Index>(1), f, context);
+}
+
+//@}
+
+} // namespace strict_ppl
+
+using strict_ppl::parallel_for;
+
+} // namespace tbb
+
+#endif /* __TBB_parallel_for_H */
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for_each.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_for_each.h
new file mode 100644 (file)
index 0000000..6b8d862
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_for_each_H
+#define __TBB_parallel_for_each_H
+
+#include "parallel_do.h"
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    // The class calls user function in operator()
+    template <typename Function, typename Iterator>
+    class parallel_for_each_body : internal::no_assign {
+        const Function &my_func;
+    public:
+        parallel_for_each_body(const Function &_func) : my_func(_func) {}
+        parallel_for_each_body(const parallel_for_each_body<Function, Iterator> &_caller) : my_func(_caller.my_func) {}
+
+        void operator() ( typename std::iterator_traits<Iterator>::value_type& value ) const {
+            my_func(value);
+        }
+    };
+} // namespace internal
+//! @endcond
+
+/** \name parallel_for_each
+    **/
+//@{
+//! Calls function f for all items from [first, last) interval using user-supplied context
+/** @ingroup algorithms */
+template<typename InputIterator, typename Function>
+void parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) {
+    internal::parallel_for_each_body<Function, InputIterator> body(f);
+
+    tbb::parallel_do (first, last, body, context);
+}
+
+//! Uses default context
+template<typename InputIterator, typename Function>
+void parallel_for_each(InputIterator first, InputIterator last, const Function& f) {
+    internal::parallel_for_each_body<Function, InputIterator> body(f);
+
+    tbb::parallel_do (first, last, body);
+}
+
+//@}
+
+} // namespace
+
+#endif /* __TBB_parallel_for_each_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_invoke.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_invoke.h
new file mode 100644 (file)
index 0000000..02c3e80
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_invoke_H
+#define __TBB_parallel_invoke_H
+
+#include "task.h"
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    // Simple task object, executing user method
+    template<typename function>
+    class function_invoker : public task{
+    public:
+        function_invoker(const function& _function) : my_function(_function) {}
+    private:
+        const function &my_function;
+        /*override*/
+        task* execute()
+        {
+            my_function();
+            return NULL;
+        }
+    };
+
+    // The class spawns two or three child tasks
+    template <size_t N, typename function1, typename function2, typename function3>
+    class spawner : public task {
+    private:
+        const function1& my_func1;
+        const function2& my_func2;
+        const function3& my_func3;
+        bool is_recycled;
+
+        task* execute (){
+            if(is_recycled){
+                return NULL;
+            }else{
+                __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong");
+                set_ref_count(N);
+                recycle_as_safe_continuation();
+                internal::function_invoker<function2>* invoker2 = new (allocate_child()) internal::function_invoker<function2>(my_func2);
+                __TBB_ASSERT(invoker2, "Child task allocation failed");
+                spawn(*invoker2);
+                size_t n = N; // To prevent compiler warnings
+                if (n>2) {
+                    internal::function_invoker<function3>* invoker3 = new (allocate_child()) internal::function_invoker<function3>(my_func3);
+                    __TBB_ASSERT(invoker3, "Child task allocation failed");
+                    spawn(*invoker3);
+                }
+                my_func1();
+                is_recycled = true;
+                return NULL;
+            }
+        } // execute
+
+    public:
+        spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {}
+    };
+
+    // Creates and spawns child tasks
+    class parallel_invoke_helper : public empty_task {
+    public:
+        // Dummy functor class
+        class parallel_invoke_noop {
+        public:
+            void operator() () const {}
+        };
+        // Creates a helper object with user-defined number of children expected
+        parallel_invoke_helper(int number_of_children)
+        {
+            set_ref_count(number_of_children + 1);
+        }
+        // Adds child task and spawns it
+        template <typename function>
+        void add_child (const function &_func)
+        {
+            internal::function_invoker<function>* invoker = new (allocate_child()) internal::function_invoker<function>(_func);
+            __TBB_ASSERT(invoker, "Child task allocation failed");
+            spawn(*invoker);
+        }
+
+        // Adds a task with multiple child tasks and spawns it
+        // two arguments
+        template <typename function1, typename function2>
+        void add_children (const function1& _func1, const function2& _func2)
+        {
+            // The third argument is dummy, it is ignored actually.
+            parallel_invoke_noop noop;
+            internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop);
+            spawn(sub_root);
+        }
+        // three arguments
+        template <typename function1, typename function2, typename function3>
+        void add_children (const function1& _func1, const function2& _func2, const function3& _func3)
+        {
+            internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3);
+            spawn(sub_root);
+        }
+
+        // Waits for all child tasks
+        template <typename F0>
+        void run_and_finish(const F0& f0)
+        {
+            internal::function_invoker<F0>* invoker = new (allocate_child()) internal::function_invoker<F0>(f0);
+            __TBB_ASSERT(invoker, "Child task allocation failed");
+            spawn_and_wait_for_all(*invoker);
+        }
+    };
+    // The class destroys root if exception occured as well as in normal case
+    class parallel_invoke_cleaner: internal::no_copy { 
+    public:
+        parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children))
+        {}
+        ~parallel_invoke_cleaner(){
+            root.destroy(root);
+        }
+        internal::parallel_invoke_helper& root;
+    };
+} // namespace internal
+//! @endcond
+
+/** \name parallel_invoke
+    **/
+//@{
+//! Executes a list of tasks in parallel and waits for all tasks to complete.
+/** @ingroup algorithms */
+
+// parallel_invoke with user-defined context
+// two arguments
+template<typename F0, typename F1 >
+void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) {
+    internal::parallel_invoke_cleaner cleaner(2, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_child(f1);
+
+    root.run_and_finish(f0);
+}
+
+// three arguments
+template<typename F0, typename F1, typename F2 >
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) {
+    internal::parallel_invoke_cleaner cleaner(3, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_child(f2);
+    root.add_child(f1);
+
+    root.run_and_finish(f0);
+}
+
+// four arguments
+template<typename F0, typename F1, typename F2, typename F3>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(4, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_child(f3);
+    root.add_child(f2);
+    root.add_child(f1);
+
+    root.run_and_finish(f0);
+}
+
+// five arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4 >
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(3, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f4, f3);
+    root.add_children(f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// six arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(3, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f5, f4, f3);
+    root.add_children(f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// seven arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(3, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f6, f5, f4);
+    root.add_children(f3, f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// eight arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4,
+         typename F5, typename F6, typename F7>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(4, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f7, f6, f5);
+    root.add_children(f4, f3);
+    root.add_children(f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// nine arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4,
+         typename F5, typename F6, typename F7, typename F8>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7, const F8& f8,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(4, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f8, f7, f6);
+    root.add_children(f5, f4, f3);
+    root.add_children(f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// ten arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4,
+         typename F5, typename F6, typename F7, typename F8, typename F9>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9,
+                     tbb::task_group_context& context)
+{
+    internal::parallel_invoke_cleaner cleaner(4, context);
+    internal::parallel_invoke_helper& root = cleaner.root;
+
+    root.add_children(f9, f8, f7);
+    root.add_children(f6, f5, f4);
+    root.add_children(f3, f2, f1);
+
+    root.run_and_finish(f0);
+}
+
+// two arguments
+template<typename F0, typename F1>
+void parallel_invoke(const F0& f0, const F1& f1) {
+    task_group_context context;
+    parallel_invoke<F0, F1>(f0, f1, context);
+}
+// three arguments
+template<typename F0, typename F1, typename F2>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) {
+    task_group_context context;
+    parallel_invoke<F0, F1, F2>(f0, f1, f2, context);
+}
+// four arguments
+template<typename F0, typename F1, typename F2, typename F3 >
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) {
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3>(f0, f1, f2, f3, context);
+}
+// five arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) {
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4>(f0, f1, f2, f3, f4, context);
+}
+// six arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) {
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4, F5>(f0, f1, f2, f3, f4, f5, context);
+}
+// seven arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4, typename F5, typename F6>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6)
+{
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4, F5, F6>(f0, f1, f2, f3, f4, f5, f6, context);
+}
+// eigth arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4, 
+         typename F5, typename F6, typename F7>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7)
+{
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7>(f0, f1, f2, f3, f4, f5, f6, f7, context);
+}
+// nine arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4,
+         typename F5, typename F6, typename F7, typename F8>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7, const F8& f8)
+{
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7, F8>(f0, f1, f2, f3, f4, f5, f6, f7, f8, context);
+}
+// ten arguments
+template<typename F0, typename F1, typename F2, typename F3, typename F4,
+         typename F5, typename F6, typename F7, typename F8, typename F9>
+void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4,
+                     const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9)
+{
+    task_group_context context;
+    parallel_invoke<F0, F1, F2, F3, F4, F5, F6, F7, F8, F9>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context);
+}
+
+//@}
+
+} // namespace
+
+#endif /* __TBB_parallel_invoke_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_reduce.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_reduce.h
new file mode 100644 (file)
index 0000000..670b626
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_reduce_H
+#define __TBB_parallel_reduce_H
+
+#include "task.h"
+#include "aligned_space.h"
+#include "partitioner.h"
+#include <new>
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! ITT instrumented routine that stores src into location pointed to by dst.
+    void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
+
+    //! ITT instrumented routine that loads pointer from location pointed to by src.
+    void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
+
+    template<typename T> inline void parallel_reduce_store_body( T*& dst, T* src ) {
+#if TBB_USE_THREADING_TOOLS
+        itt_store_pointer_with_release_v3(&dst,src);
+#else
+        __TBB_store_with_release(dst,src);
+#endif /* TBB_USE_THREADING_TOOLS */
+    }
+
+    template<typename T> inline T* parallel_reduce_load_body( T*& src ) {
+#if TBB_USE_THREADING_TOOLS
+        return static_cast<T*>(itt_load_pointer_with_acquire_v3(&src));
+#else
+        return __TBB_load_with_acquire(src);
+#endif /* TBB_USE_THREADING_TOOLS */
+    }
+
+    //! 0 if root, 1 if a left child, 2 if a right child.
+    /** Represented as a char, not enum, for compactness. */
+    typedef char reduction_context;
+
+    //! Task type use to combine the partial results of parallel_reduce.
+    /** @ingroup algorithms */
+    template<typename Body>
+    class finish_reduce: public task {
+        //! Pointer to body, or NULL if the left child has not yet finished. 
+        Body* my_body;
+        bool has_right_zombie;
+        const reduction_context my_context;
+        aligned_space<Body,1> zombie_space;
+        finish_reduce( char context_ ) : 
+            my_body(NULL),
+            has_right_zombie(false),
+            my_context(context_)
+        {
+        }
+        task* execute() {
+            if( has_right_zombie ) {
+                // Right child was stolen.
+                Body* s = zombie_space.begin();
+                my_body->join( *s );
+                s->~Body();
+            }
+            if( my_context==1 ) 
+                parallel_reduce_store_body( static_cast<finish_reduce*>(parent())->my_body, my_body );
+            return NULL;
+        }       
+        template<typename Range,typename Body_, typename Partitioner>
+        friend class start_reduce;
+    };
+
+    //! Task type used to split the work of parallel_reduce.
+    /** @ingroup algorithms */
+    template<typename Range, typename Body, typename Partitioner>
+    class start_reduce: public task {
+        typedef finish_reduce<Body> finish_type;
+        Body* my_body;
+        Range my_range;
+        typename Partitioner::partition_type my_partition;
+        reduction_context my_context;
+        /*override*/ task* execute();
+        template<typename Body_>
+        friend class finish_reduce;
+    
+        //! Constructor used for root task
+        start_reduce( const Range& range, Body* body, Partitioner& partitioner ) :
+            my_body(body),
+            my_range(range),
+            my_partition(partitioner),
+            my_context(0)
+        {
+        }
+        //! Splitting constructor used to generate children.
+        /** this becomes left child.  Newly constructed object is right child. */
+        start_reduce( start_reduce& parent_, split ) :
+            my_body(parent_.my_body),
+            my_range(parent_.my_range,split()),
+            my_partition(parent_.my_partition,split()),
+            my_context(2)
+        {
+            my_partition.set_affinity(*this);
+            parent_.my_context = 1;
+        }
+        //! Update affinity info, if any
+        /*override*/ void note_affinity( affinity_id id ) {
+            my_partition.note_affinity( id );
+        }
+
+public:
+        static void run( const Range& range, Body& body, Partitioner& partitioner ) {
+            if( !range.empty() ) {
+#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
+                task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) );
+#else
+                // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
+                // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
+                task_group_context context;
+                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
+#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
+            }
+        }
+#if __TBB_TASK_GROUP_CONTEXT
+        static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
+            if( !range.empty() ) 
+                task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
+        }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    };
+
+    template<typename Range, typename Body, typename Partitioner>
+    task* start_reduce<Range,Body,Partitioner>::execute() {
+        if( my_context==2 ) {
+            finish_type* p = static_cast<finish_type*>(parent() );
+            if( !parallel_reduce_load_body(p->my_body) ) {
+                my_body = new( p->zombie_space.begin() ) Body(*my_body,split());
+                p->has_right_zombie = true;
+            } 
+        }
+        if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) {
+            (*my_body)( my_range );
+            if( my_context==1 ) 
+                parallel_reduce_store_body(static_cast<finish_type*>(parent())->my_body, my_body );
+            return my_partition.continue_after_execute_range();
+        } else {
+            finish_type& c = *new( allocate_continuation()) finish_type(my_context);
+            recycle_as_child_of(c);
+            c.set_ref_count(2);    
+            bool delay = my_partition.decide_whether_to_delay();
+            start_reduce& b = *new( c.allocate_child() ) start_reduce(*this,split());
+            my_partition.spawn_or_delay(delay,b);
+            return this;
+        }
+    } 
+
+    //! Auxiliary class for parallel_reduce; for internal use only.
+    /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body"
+        using given \ref parallel_reduce_lambda_req "anonymous function objects".
+     **/
+    /** @ingroup algorithms */
+    template<typename Range, typename Value, typename RealBody, typename Reduction>
+    class lambda_reduce_body {
+
+//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced
+//       (might require some performance measurements)
+
+        const Value&     identity_element;
+        const RealBody&  my_real_body;
+        const Reduction& my_reduction;
+        Value            my_value;
+        lambda_reduce_body& operator= ( const lambda_reduce_body& other );
+    public:
+        lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction )
+            : identity_element(identity)
+            , my_real_body(body)
+            , my_reduction(reduction)
+            , my_value(identity)
+        { }
+        lambda_reduce_body( const lambda_reduce_body& other )
+            : identity_element(other.identity_element)
+            , my_real_body(other.my_real_body)
+            , my_reduction(other.my_reduction)
+            , my_value(other.my_value)
+        { }
+        lambda_reduce_body( lambda_reduce_body& other, tbb::split )
+            : identity_element(other.identity_element)
+            , my_real_body(other.my_real_body)
+            , my_reduction(other.my_reduction)
+            , my_value(other.identity_element)
+        { }
+        void operator()(Range& range) {
+            my_value = my_real_body(range, const_cast<const Value&>(my_value));
+        }
+        void join( lambda_reduce_body& rhs ) {
+            my_value = my_reduction(const_cast<const Value&>(my_value), const_cast<const Value&>(rhs.my_value));
+        }
+        Value result() const {
+            return my_value;
+        }
+    };
+
+} // namespace internal
+//! @endcond
+
+// Requirements on Range concept are documented in blocked_range.h
+
+/** \page parallel_reduce_body_req Requirements on parallel_reduce body
+    Class \c Body implementing the concept of parallel_reduce body must define:
+    - \code Body::Body( Body&, split ); \endcode        Splitting constructor.
+                                                        Must be able to run concurrently with operator() and method \c join
+    - \code Body::~Body(); \endcode                     Destructor
+    - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r
+                                                        and accumulating the result
+    - \code void Body::join( Body& b ); \endcode        Join results. 
+                                                        The result in \c b should be merged into the result of \c this
+**/
+
+/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions)
+    TO BE DOCUMENTED
+**/
+
+/** \name parallel_reduce
+    See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/
+//@{
+
+//! Parallel iteration with reduction and default partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body ) {
+    internal::start_reduce<Range,Body, const __TBB_DEFAULT_PARTITIONER>::run( range, body, __TBB_DEFAULT_PARTITIONER() );
+}
+
+//! Parallel iteration with reduction and simple_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
+    internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner );
+}
+
+//! Parallel iteration with reduction and auto_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) {
+    internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner );
+}
+
+//! Parallel iteration with reduction and affinity_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) {
+    internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner );
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+//! Parallel iteration with reduction, simple partitioner and user-supplied context.
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
+    internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner, context );
+}
+
+//! Parallel iteration with reduction, auto_partitioner and user-supplied context
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) {
+    internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner, context );
+}
+
+//! Parallel iteration with reduction, affinity_partitioner and user-supplied context
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) {
+    internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner, context );
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+/** parallel_reduce overloads that work with anonymous function objects
+    (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/
+
+//! Parallel iteration with reduction and default partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const __TBB_DEFAULT_PARTITIONER>
+                          ::run(range, body, __TBB_DEFAULT_PARTITIONER() );
+    return body.result();
+}
+
+//! Parallel iteration with reduction and simple_partitioner.
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       const simple_partitioner& partitioner ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
+                          ::run(range, body, partitioner );
+    return body.result();
+}
+
+//! Parallel iteration with reduction and auto_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       const auto_partitioner& partitioner ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
+                          ::run( range, body, partitioner );
+    return body.result();
+}
+
+//! Parallel iteration with reduction and affinity_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       affinity_partitioner& partitioner ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
+                                        ::run( range, body, partitioner );
+    return body.result();
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+//! Parallel iteration with reduction, simple partitioner and user-supplied context.
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       const simple_partitioner& partitioner, task_group_context& context ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
+                          ::run( range, body, partitioner, context );
+    return body.result();
+}
+
+//! Parallel iteration with reduction, auto_partitioner and user-supplied context
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       const auto_partitioner& partitioner, task_group_context& context ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
+                          ::run( range, body, partitioner, context );
+    return body.result();
+}
+
+//! Parallel iteration with reduction, affinity_partitioner and user-supplied context
+/** @ingroup algorithms **/
+template<typename Range, typename Value, typename RealBody, typename Reduction>
+Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
+                       affinity_partitioner& partitioner, task_group_context& context ) {
+    internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
+    internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
+                                        ::run( range, body, partitioner, context );
+    return body.result();
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+//@}
+
+} // namespace tbb
+
+#endif /* __TBB_parallel_reduce_H */
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_scan.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_scan.h
new file mode 100644 (file)
index 0000000..3a1963f
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_scan_H
+#define __TBB_parallel_scan_H
+
+#include "task.h"
+#include "aligned_space.h"
+#include <new>
+#include "partitioner.h"
+
+namespace tbb {
+
+//! Used to indicate that the initial scan is being performed.
+/** @ingroup algorithms */
+struct pre_scan_tag {
+    static bool is_final_scan() {return false;}
+};
+
+//! Used to indicate that the final scan is being performed.
+/** @ingroup algorithms */
+struct final_scan_tag {
+    static bool is_final_scan() {return true;}
+};
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! Performs final scan for a leaf 
+    /** @ingroup algorithms */
+    template<typename Range, typename Body>
+    class final_sum: public task {
+    public:
+        Body body;
+    private:
+        aligned_space<Range,1> range;
+        //! Where to put result of last subrange, or NULL if not last subrange.
+        Body* stuff_last;
+    public:
+        final_sum( Body& body_ ) :
+            body(body_,split())
+        {
+            poison_pointer(stuff_last);
+        }
+        ~final_sum() {
+            range.begin()->~Range();
+        }     
+        void finish_construction( const Range& range_, Body* stuff_last_ ) {
+            new( range.begin() ) Range(range_);
+            stuff_last = stuff_last_;
+        }
+    private:
+        /*override*/ task* execute() {
+            body( *range.begin(), final_scan_tag() );
+            if( stuff_last )
+                stuff_last->assign(body);
+            return NULL;
+        }
+    };       
+
+    //! Split work to be done in the scan.
+    /** @ingroup algorithms */
+    template<typename Range, typename Body>
+    class sum_node: public task {
+        typedef final_sum<Range,Body> final_sum_type;
+    public:
+        final_sum_type *incoming; 
+        final_sum_type *body;
+        Body *stuff_last;
+    private:
+        final_sum_type *left_sum;
+        sum_node *left;
+        sum_node *right;     
+        bool left_is_final;
+        Range range;
+        sum_node( const Range range_, bool left_is_final_ ) : 
+            left_sum(NULL), 
+            left(NULL), 
+            right(NULL), 
+            left_is_final(left_is_final_), 
+            range(range_)
+        {
+            // Poison fields that will be set by second pass.
+            poison_pointer(body);
+            poison_pointer(incoming);
+        }
+        task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) {
+            if( !n ) {
+                f.recycle_as_child_of( *this );
+                f.finish_construction( range_, stuff_last_ );
+                return &f;
+            } else {
+                n->body = &f;
+                n->incoming = incoming_;
+                n->stuff_last = stuff_last_;
+                return n;
+            }
+        }
+        /*override*/ task* execute() {
+            if( body ) {
+                if( incoming )
+                    left_sum->body.reverse_join( incoming->body );
+                recycle_as_continuation();
+                sum_node& c = *this;
+                task* b = c.create_child(Range(range,split()),*left_sum,right,left_sum,stuff_last);
+                task* a = left_is_final ? NULL : c.create_child(range,*body,left,incoming,NULL);
+                set_ref_count( (a!=NULL)+(b!=NULL) );
+                body = NULL; 
+                if( a ) spawn(*b);
+                else a = b;
+                return a;
+            } else {
+                return NULL;
+            }
+        }
+        template<typename Range_,typename Body_,typename Partitioner_>
+        friend class start_scan;
+
+        template<typename Range_,typename Body_>
+        friend class finish_scan;
+    };
+
+    //! Combine partial results
+    /** @ingroup algorithms */
+    template<typename Range, typename Body>
+    class finish_scan: public task {
+        typedef sum_node<Range,Body> sum_node_type;
+        typedef final_sum<Range,Body> final_sum_type;
+        final_sum_type** const sum;
+        sum_node_type*& return_slot;
+    public:
+        final_sum_type* right_zombie;
+        sum_node_type& result;
+
+        /*override*/ task* execute() {
+            __TBB_ASSERT( result.ref_count()==(result.left!=NULL)+(result.right!=NULL), NULL );
+            if( result.left )
+                result.left_is_final = false;
+            if( right_zombie && sum ) 
+                ((*sum)->body).reverse_join(result.left_sum->body);
+            __TBB_ASSERT( !return_slot, NULL );
+            if( right_zombie || result.right ) {
+                return_slot = &result;
+            } else {
+                destroy( result );
+            }
+            if( right_zombie && !sum && !result.right ) destroy(*right_zombie);
+            return NULL;
+        }
+
+        finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : 
+            sum(sum_),
+            return_slot(return_slot_), 
+            right_zombie(NULL),
+            result(result_)
+        {
+            __TBB_ASSERT( !return_slot, NULL );
+        }
+    };
+
+    //! Initial task to split the work
+    /** @ingroup algorithms */
+    template<typename Range, typename Body, typename Partitioner=simple_partitioner>
+    class start_scan: public task {
+        typedef sum_node<Range,Body> sum_node_type;
+        typedef final_sum<Range,Body> final_sum_type;
+        final_sum_type* body;
+        /** Non-null if caller is requesting total. */
+        final_sum_type** sum; 
+        sum_node_type** return_slot;
+        /** Null if computing root. */
+        sum_node_type* parent_sum;
+        bool is_final;
+        bool is_right_child;
+        Range range;
+        typename Partitioner::partition_type partition;
+        /*override*/ task* execute();
+    public:
+        start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) :
+            body(parent_.body),
+            sum(parent_.sum),
+            return_slot(&return_slot_),
+            parent_sum(parent_sum_),
+            is_final(parent_.is_final),
+            is_right_child(false),
+            range(parent_.range,split()),
+            partition(parent_.partition,split())
+        {
+            __TBB_ASSERT( !*return_slot, NULL );
+        }
+
+        start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) :
+            body(&body_),
+            sum(NULL),
+            return_slot(&return_slot_),
+            parent_sum(NULL),
+            is_final(true),
+            is_right_child(false),
+            range(range_),
+            partition(partitioner_)
+        {
+            __TBB_ASSERT( !*return_slot, NULL );
+        }
+
+        static void run(  const Range& range, Body& body, const Partitioner& partitioner ) {
+            if( !range.empty() ) {
+                typedef internal::start_scan<Range,Body,Partitioner> start_pass1_type;
+                internal::sum_node<Range,Body>* root = NULL;
+                typedef internal::final_sum<Range,Body> final_sum_type;
+                final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body );
+                start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type(
+                    /*return_slot=*/root,
+                    range,
+                    *temp_body,
+                    partitioner );
+                task::spawn_root_and_wait( pass1 );
+                if( root ) {
+                    root->body = temp_body;
+                    root->incoming = NULL;
+                    root->stuff_last = &body;
+                    task::spawn_root_and_wait( *root );
+                } else {
+                    body.assign(temp_body->body);
+                    temp_body->finish_construction( range, NULL );
+                    temp_body->destroy(*temp_body);
+                }
+            }
+        }
+    };
+
+    template<typename Range, typename Body, typename Partitioner>
+    task* start_scan<Range,Body,Partitioner>::execute() {
+        typedef internal::finish_scan<Range,Body> finish_pass1_type;
+        finish_pass1_type* p = parent_sum ? static_cast<finish_pass1_type*>( parent() ) : NULL;
+        // Inspecting p->result.left_sum would ordinarily be a race condition.
+        // But we inspect it only if we are not a stolen task, in which case we
+        // know that task assigning to p->result.left_sum has completed.
+        bool treat_as_stolen = is_right_child && (is_stolen_task() || body!=p->result.left_sum);
+        if( treat_as_stolen ) {
+            // Invocation is for right child that has been really stolen or needs to be virtually stolen
+            p->right_zombie = body = new( allocate_root() ) final_sum_type(body->body);
+            is_final = false;
+        }
+        task* next_task = NULL;
+        if( (is_right_child && !treat_as_stolen) || !range.is_divisible() || partition.should_execute_range(*this) ) {
+            if( is_final )
+                (body->body)( range, final_scan_tag() );
+            else if( sum )
+                (body->body)( range, pre_scan_tag() );
+            if( sum ) 
+                *sum = body;
+            __TBB_ASSERT( !*return_slot, NULL );
+        } else {
+            sum_node_type* result;
+            if( parent_sum ) 
+                result = new(allocate_additional_child_of(*parent_sum)) sum_node_type(range,/*left_is_final=*/is_final);
+            else
+                result = new(task::allocate_root()) sum_node_type(range,/*left_is_final=*/is_final);
+            finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*return_slot,sum,*result);
+            // Split off right child
+            start_scan& b = *new( c.allocate_child() ) start_scan( /*return_slot=*/result->right, *this, result );
+            b.is_right_child = true;    
+            // Left child is recycling of *this.  Must recycle this before spawning b, 
+            // otherwise b might complete and decrement c.ref_count() to zero, which
+            // would cause c.execute() to run prematurely.
+            recycle_as_child_of(c);
+            c.set_ref_count(2);
+            c.spawn(b);
+            sum = &result->left_sum;
+            return_slot = &result->left;
+            is_right_child = false;
+            next_task = this;
+            parent_sum = result; 
+            __TBB_ASSERT( !*return_slot, NULL );
+        }
+        return next_task;
+    } 
+} // namespace internal
+//! @endcond
+
+// Requirements on Range concept are documented in blocked_range.h
+
+/** \page parallel_scan_body_req Requirements on parallel_scan body
+    Class \c Body implementing the concept of parallel_reduce body must define:
+    - \code Body::Body( Body&, split ); \endcode    Splitting constructor.
+                                                    Split \c b so that \c this and \c b can accumulate separately
+    - \code Body::~Body(); \endcode                 Destructor
+    - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode
+                                                    Preprocess iterations for range \c r
+    - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode 
+                                                    Do final processing for iterations of range \c r
+    - \code void Body::reverse_join( Body& a ); \endcode
+                                                    Merge preprocessing state of \c a into \c this, where \c a was 
+                                                    created earlier from \c b by b's splitting constructor
+**/
+
+/** \name parallel_scan
+    See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/
+//@{
+
+//! Parallel prefix with default partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_scan( const Range& range, Body& body ) {
+    internal::start_scan<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,body,__TBB_DEFAULT_PARTITIONER());
+}
+
+//! Parallel prefix with simple_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) {
+    internal::start_scan<Range,Body,simple_partitioner>::run(range,body,partitioner);
+}
+
+//! Parallel prefix with auto_partitioner
+/** @ingroup algorithms **/
+template<typename Range, typename Body>
+void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) {
+    internal::start_scan<Range,Body,auto_partitioner>::run(range,body,partitioner);
+}
+//@}
+
+} // namespace tbb
+
+#endif /* __TBB_parallel_scan_H */
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_sort.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_sort.h
new file mode 100644 (file)
index 0000000..6fbbe80
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_sort_H
+#define __TBB_parallel_sort_H
+
+#include "parallel_for.h"
+#include "blocked_range.h"
+#include <algorithm>
+#include <iterator>
+#include <functional>
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+
+//! Range used in quicksort to split elements into subranges based on a value.
+/** The split operation selects a splitter and places all elements less than or equal 
+    to the value in the first range and the remaining elements in the second range.
+    @ingroup algorithms */
+template<typename RandomAccessIterator, typename Compare>
+class quick_sort_range: private no_assign {
+
+    inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const {
+        return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) 
+                                        : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) );
+    }
+
+    inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const {
+        size_t offset = range.size/8u;
+        return median_of_three(array, 
+                               median_of_three(array, 0, offset, offset*2),
+                               median_of_three(array, offset*3, offset*4, offset*5),
+                               median_of_three(array, offset*6, offset*7, range.size - 1) );
+
+    }
+
+public:
+
+    static const size_t grainsize = 500;
+    const Compare &comp;
+    RandomAccessIterator begin;
+    size_t size;
+
+    quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) :
+        comp(comp_), begin(begin_), size(size_) {}
+
+    bool empty() const {return size==0;}
+    bool is_divisible() const {return size>=grainsize;}
+
+    quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) {
+        RandomAccessIterator array = range.begin;
+        RandomAccessIterator key0 = range.begin; 
+        size_t m = pseudo_median_of_nine(array, range);
+        if (m) std::swap ( array[0], array[m] );
+
+        size_t i=0;
+        size_t j=range.size;
+        // Partition interval [i+1,j-1] with key *key0.
+        for(;;) {
+            __TBB_ASSERT( i<j, NULL );
+            // Loop must terminate since array[l]==*key0.
+            do {
+                --j;
+                __TBB_ASSERT( i<=j, "bad ordering relation?" );
+            } while( comp( *key0, array[j] ));
+            do {
+                __TBB_ASSERT( i<=j, NULL );
+                if( i==j ) goto partition;
+                ++i;
+            } while( comp( array[i],*key0 ));
+            if( i==j ) goto partition;
+            std::swap( array[i], array[j] );
+        }
+partition:
+        // Put the partition key were it belongs
+        std::swap( array[j], *key0 );
+        // array[l..j) is less or equal to key.
+        // array(j..r) is greater or equal to key.
+        // array[j] is equal to key
+        i=j+1;
+        begin = array+i;
+        size = range.size-i;
+        range.size = j;
+    }
+};
+
+//! Body class used to test if elements in a range are presorted
+/** @ingroup algorithms */
+template<typename RandomAccessIterator, typename Compare>
+class quick_sort_pretest_body : internal::no_assign {
+    const Compare &comp;
+
+public:
+    quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {}
+
+    void operator()( const blocked_range<RandomAccessIterator>& range ) const {
+        task &my_task = task::self();
+        RandomAccessIterator my_end = range.end();
+
+        int i = 0;
+        for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) {
+            if ( i%64 == 0 && my_task.is_cancelled() ) break;
+          
+            // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1
+            if ( comp( *(k), *(k-1) ) ) {
+                my_task.cancel_group_execution();
+                break;
+            }
+        }
+    }
+
+};
+
+//! Body class used to sort elements in a range that is smaller than the grainsize.
+/** @ingroup algorithms */
+template<typename RandomAccessIterator, typename Compare>
+struct quick_sort_body {
+    void operator()( const quick_sort_range<RandomAccessIterator,Compare>& range ) const {
+        //SerialQuickSort( range.begin, range.size, range.comp );
+        std::sort( range.begin, range.begin + range.size, range.comp );
+    }
+};
+
+//! Wrapper method to initiate the sort by calling parallel_for.
+/** @ingroup algorithms */
+template<typename RandomAccessIterator, typename Compare>
+void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) {
+    task_group_context my_context;
+    const int serial_cutoff = 9;
+
+    __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" );
+    RandomAccessIterator k;
+    for ( k = begin ; k != begin + serial_cutoff; ++k ) {
+        if ( comp( *(k+1), *k ) ) {
+            goto do_parallel_quick_sort;
+        }
+    }
+
+    parallel_for( blocked_range<RandomAccessIterator>(k+1, end),
+                  quick_sort_pretest_body<RandomAccessIterator,Compare>(comp),
+                  auto_partitioner(),
+                  my_context);
+
+    if (my_context.is_group_execution_cancelled())
+do_parallel_quick_sort:
+        parallel_for( quick_sort_range<RandomAccessIterator,Compare>(begin, end-begin, comp ), 
+                      quick_sort_body<RandomAccessIterator,Compare>(),
+                      auto_partitioner() );
+}
+
+} // namespace internal
+//! @endcond
+
+/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort
+    Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort:
+    - \code void swap( T& x, T& y ) \endcode        Swaps \c x and \c y
+    - \code bool Compare::operator()( const T& x, const T& y ) \endcode
+                                                    True if x comes before y;
+**/
+
+/** \name parallel_sort
+    See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/
+//@{
+
+//! Sorts the data in [begin,end) using the given comparator 
+/** The compare function object is used for all comparisons between elements during sorting.
+    The compare object must define a bool operator() function.
+    @ingroup algorithms **/
+template<typename RandomAccessIterator, typename Compare>
+void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { 
+    const int min_parallel_size = 500; 
+    if( end > begin ) {
+        if (end - begin < min_parallel_size) { 
+            std::sort(begin, end, comp);
+        } else {
+            internal::parallel_quick_sort(begin, end, comp);
+        }
+    }
+}
+
+//! Sorts the data in [begin,end) with a default comparator \c std::less<RandomAccessIterator>
+/** @ingroup algorithms **/
+template<typename RandomAccessIterator>
+inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { 
+    parallel_sort( begin, end, std::less< typename std::iterator_traits<RandomAccessIterator>::value_type >() );
+}
+
+//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less<T>
+/** @ingroup algorithms **/
+template<typename T>
+inline void parallel_sort( T * begin, T * end ) {
+    parallel_sort( begin, end, std::less< T >() );
+}   
+//@}
+
+
+} // namespace tbb
+
+#endif
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_while.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/parallel_while.h
new file mode 100644 (file)
index 0000000..21c2bc1
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_parallel_while
+#define __TBB_parallel_while
+
+#include "task.h"
+#include <new>
+
+namespace tbb {
+
+template<typename Body>
+class parallel_while;
+
+//! @cond INTERNAL
+namespace internal {
+
+    template<typename Stream, typename Body> class while_task;
+
+    //! For internal use only.
+    /** Executes one iteration of a while.
+        @ingroup algorithms */
+    template<typename Body>
+    class while_iteration_task: public task {
+        const Body& my_body;
+        typename Body::argument_type my_value;
+        /*override*/ task* execute() {
+            my_body(my_value); 
+            return NULL;
+        }
+        while_iteration_task( const typename Body::argument_type& value, const Body& body ) : 
+            my_body(body), my_value(value)
+        {}
+        template<typename Body_> friend class while_group_task;
+        friend class tbb::parallel_while<Body>;
+    };
+
+    //! For internal use only
+    /** Unpacks a block of iterations.
+        @ingroup algorithms */
+    template<typename Body>
+    class while_group_task: public task {
+        static const size_t max_arg_size = 4;         
+        const Body& my_body;
+        size_t size;
+        typename Body::argument_type my_arg[max_arg_size];
+        while_group_task( const Body& body ) : my_body(body), size(0) {} 
+        /*override*/ task* execute() {
+            typedef while_iteration_task<Body> iteration_type;
+            __TBB_ASSERT( size>0, NULL );
+            task_list list;
+            task* t; 
+            size_t k=0; 
+            for(;;) {
+                t = new( allocate_child() ) iteration_type(my_arg[k],my_body); 
+                if( ++k==size ) break;
+                list.push_back(*t);
+            }
+            set_ref_count(int(k+1));
+            spawn(list);
+            spawn_and_wait_for_all(*t);
+            return NULL;
+        }
+        template<typename Stream, typename Body_> friend class while_task;
+    };
+    
+    //! For internal use only.
+    /** Gets block of iterations from a stream and packages them into a while_group_task.
+        @ingroup algorithms */
+    template<typename Stream, typename Body>
+    class while_task: public task {
+        Stream& my_stream;
+        const Body& my_body;
+        empty_task& my_barrier;
+        /*override*/ task* execute() {
+            typedef while_group_task<Body> block_type;
+            block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body);
+            size_t k=0; 
+            while( my_stream.pop_if_present(t.my_arg[k]) ) {
+                if( ++k==block_type::max_arg_size ) {
+                    // There might be more iterations.
+                    recycle_to_reexecute();
+                    break;
+                }
+            }
+            if( k==0 ) {
+                destroy(t);
+                return NULL;
+            } else {
+                t.size = k;
+                return &t;
+            }
+        }
+        while_task( Stream& stream, const Body& body, empty_task& barrier ) : 
+            my_stream(stream),
+            my_body(body),
+            my_barrier(barrier)
+        {} 
+        friend class tbb::parallel_while<Body>;
+    };
+
+} // namespace internal
+//! @endcond
+
+//! Parallel iteration over a stream, with optional addition of more work.
+/** The Body b has the requirement: \n
+        "b(v)"                      \n
+        "b.argument_type"           \n
+    where v is an argument_type
+    @ingroup algorithms */
+template<typename Body>
+class parallel_while: internal::no_copy {
+public:
+    //! Construct empty non-running parallel while.
+    parallel_while() : my_body(NULL), my_barrier(NULL) {}
+
+    //! Destructor cleans up data members before returning.
+    ~parallel_while() {
+        if( my_barrier ) {
+            my_barrier->destroy(*my_barrier);    
+            my_barrier = NULL;
+        }
+    }
+
+    //! Type of items
+    typedef typename Body::argument_type value_type;
+
+    //! Apply body.apply to each item in the stream.
+    /** A Stream s has the requirements \n
+         "S::value_type"                \n
+         "s.pop_if_present(value) is convertible to bool */
+    template<typename Stream>
+    void run( Stream& stream, const Body& body );
+
+    //! Add a work item while running.
+    /** Should be executed only by body.apply or a thread spawned therefrom. */
+    void add( const value_type& item );
+
+private:
+    const Body* my_body;
+    empty_task* my_barrier;
+};
+
+template<typename Body>
+template<typename Stream>
+void parallel_while<Body>::run( Stream& stream, const Body& body ) {
+    using namespace internal;
+    empty_task& barrier = *new( task::allocate_root() ) empty_task();
+    my_body = &body;
+    my_barrier = &barrier;
+    my_barrier->set_ref_count(2);
+    while_task<Stream,Body>& w = *new( my_barrier->allocate_child() ) while_task<Stream,Body>( stream, body, barrier );
+    my_barrier->spawn_and_wait_for_all(w);
+    my_barrier->destroy(*my_barrier);
+    my_barrier = NULL;
+    my_body = NULL;
+}
+
+template<typename Body>
+void parallel_while<Body>::add( const value_type& item ) {
+    __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running");
+    typedef internal::while_iteration_task<Body> iteration_type;
+    iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body);
+    task::self().spawn( i );
+}
+
+} // namespace 
+
+#endif /* __TBB_parallel_while */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/partitioner.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/partitioner.h
new file mode 100644 (file)
index 0000000..98db3ac
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_partitioner_H
+#define __TBB_partitioner_H
+
+#include "task.h"
+
+namespace tbb {
+class affinity_partitioner;
+
+//! @cond INTERNAL
+namespace internal {
+size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor();
+
+//! Defines entry points into tbb run-time library;
+/** The entry points are the constructor and destructor. */
+class affinity_partitioner_base_v3: no_copy {
+    friend class tbb::affinity_partitioner;
+    //! Array that remembers affinities of tree positions to affinity_id.
+    /** NULL if my_size==0. */
+    affinity_id* my_array;
+    //! Number of elements in my_array.
+    size_t my_size;
+    //! Zeros the fields.
+    affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {}
+    //! Deallocates my_array.
+    ~affinity_partitioner_base_v3() {resize(0);}
+    //! Resize my_array.
+    /** Retains values if resulting size is the same. */
+    void __TBB_EXPORTED_METHOD resize( unsigned factor );
+    friend class affinity_partition_type;
+};
+
+//! Provides default methods for partition objects without affinity.
+class partition_type_base {
+public:
+    void set_affinity( task & ) {}
+    void note_affinity( task::affinity_id ) {}
+    task* continue_after_execute_range() {return NULL;}
+    bool decide_whether_to_delay() {return false;}
+    void spawn_or_delay( bool, task& b ) {
+        task::spawn(b);
+    }
+};
+
+class affinity_partition_type;
+
+template<typename Range, typename Body, typename Partitioner> class start_for;
+template<typename Range, typename Body, typename Partitioner> class start_reduce;
+template<typename Range, typename Body> class start_reduce_with_affinity;
+template<typename Range, typename Body, typename Partitioner> class start_scan;
+
+} // namespace internal
+//! @endcond
+
+//! A simple partitioner 
+/** Divides the range until the range is not divisible. 
+    @ingroup algorithms */
+class simple_partitioner {
+public:
+    simple_partitioner() {}
+private:
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_for;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_reduce;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
+
+    class partition_type: public internal::partition_type_base {
+    public:
+        bool should_execute_range(const task& ) {return false;}
+        partition_type( const simple_partitioner& ) {}
+        partition_type( const partition_type&, split ) {}
+    };
+};
+
+//! An auto partitioner 
+/** The range is initial divided into several large chunks.
+    Chunks are further subdivided into VICTIM_CHUNKS pieces if they are stolen and divisible.
+    @ingroup algorithms */
+class auto_partitioner {
+public:
+    auto_partitioner() {}
+
+private:
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_for;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_reduce;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
+
+    class partition_type: public internal::partition_type_base {
+        size_t num_chunks;
+        static const size_t VICTIM_CHUNKS = 4;
+public:
+        bool should_execute_range(const task &t) {
+            if( num_chunks<VICTIM_CHUNKS && t.is_stolen_task() )
+                num_chunks = VICTIM_CHUNKS;
+            return num_chunks==1;
+        }
+        partition_type( const auto_partitioner& ) : num_chunks(internal::get_initial_auto_partitioner_divisor()) {}
+        partition_type( partition_type& pt, split ) {
+            num_chunks = pt.num_chunks /= 2u;
+        }
+    };
+};
+
+//! An affinity partitioner
+class affinity_partitioner: internal::affinity_partitioner_base_v3 {
+public:
+    affinity_partitioner() {}
+
+private:
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_for;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_reduce;
+    template<typename Range, typename Body> friend class internal::start_reduce_with_affinity;
+    template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
+
+    typedef internal::affinity_partition_type partition_type;
+    friend class internal::affinity_partition_type;
+};
+
+//! @cond INTERNAL
+namespace internal {
+
+class affinity_partition_type: public no_copy {
+    //! Must be power of two
+    static const unsigned factor = 16;
+    static const size_t VICTIM_CHUNKS = 4;
+
+    internal::affinity_id* my_array;
+    task_list delay_list;
+    unsigned map_begin, map_end;
+    size_t num_chunks;
+public:
+    affinity_partition_type( affinity_partitioner& ap ) {
+        __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); 
+        ap.resize(factor);
+        my_array = ap.my_array;
+        map_begin = 0;
+        map_end = unsigned(ap.my_size);
+        num_chunks = internal::get_initial_auto_partitioner_divisor();
+    }
+    affinity_partition_type(affinity_partition_type& p, split) : my_array(p.my_array) {
+        __TBB_ASSERT( p.map_end-p.map_begin<factor || (p.map_end-p.map_begin)%factor==0, NULL );
+        num_chunks = p.num_chunks /= 2;
+        unsigned e = p.map_end;
+        unsigned d = (e - p.map_begin)/2;
+        if( d>factor ) 
+            d &= 0u-factor;
+        map_end = e;
+        map_begin = p.map_end = e-d;
+    }
+
+    bool should_execute_range(const task &t) {
+        if( num_chunks < VICTIM_CHUNKS && t.is_stolen_task() )
+            num_chunks = VICTIM_CHUNKS;
+        return num_chunks == 1;
+    }
+
+    void set_affinity( task &t ) {
+        if( map_begin<map_end )
+            t.set_affinity( my_array[map_begin] );
+    }
+    void note_affinity( task::affinity_id id ) {
+        if( map_begin<map_end ) 
+            my_array[map_begin] = id;
+    }
+    task* continue_after_execute_range() {
+        task* first = NULL;
+        if( !delay_list.empty() ) {
+            first = &delay_list.pop_front();
+            while( !delay_list.empty() ) {
+                task::spawn(*first);
+                first = &delay_list.pop_front();
+            }
+        }
+        return first;
+    }
+    bool decide_whether_to_delay() {
+        // The possible underflow caused by "-1u" is deliberate
+        return (map_begin&(factor-1))==0 && map_end-map_begin-1u<factor;
+    }
+    void spawn_or_delay( bool delay, task& b ) {
+        if( delay )  
+            delay_list.push_back(b);
+        else 
+            task::spawn(b);
+    }
+
+    ~affinity_partition_type() {
+        // The delay_list can be non-empty if an exception is thrown.
+        while( !delay_list.empty() ) {
+            task& t = delay_list.pop_front();
+            t.destroy(t);
+        } 
+    }
+};
+
+} // namespace internal
+//! @endcond
+
+
+} // namespace tbb
+
+#endif /* __TBB_partitioner_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/pipeline.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/pipeline.h
new file mode 100644 (file)
index 0000000..fa726cd
--- /dev/null
@@ -0,0 +1,559 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_pipeline_H 
+#define __TBB_pipeline_H 
+
+#include "atomic.h"
+#include "task.h"
+#include "tbb_allocator.h"
+#include <cstddef>
+
+namespace tbb {
+
+class pipeline;
+class filter;
+
+//! @cond INTERNAL
+namespace internal {
+
+// The argument for PIPELINE_VERSION should be an integer between 2 and 9
+#define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)<<1
+
+typedef unsigned long Token;
+typedef long tokendiff_t;
+class stage_task;
+class input_buffer;
+class pipeline_root_task;
+class pipeline_cleaner;
+
+} // namespace internal
+
+namespace interface5 {
+    template<typename T, typename U> class filter_t;
+
+    namespace internal {
+        class pipeline_proxy;
+    }
+}
+
+//! @endcond
+
+//! A stage in a pipeline.
+/** @ingroup algorithms */
+class filter: internal::no_copy {
+private:
+    //! Value used to mark "not in pipeline"
+    static filter* not_in_pipeline() {return reinterpret_cast<filter*>(intptr_t(-1));}
+    
+    //! The lowest bit 0 is for parallel vs. serial
+    static const unsigned char filter_is_serial = 0x1; 
+
+    //! 4th bit distinguishes ordered vs unordered filters.
+    /** The bit was not set for parallel filters in TBB 2.1 and earlier,
+        but is_ordered() function always treats parallel filters as out of order. */
+    static const unsigned char filter_is_out_of_order = 0x1<<4;  
+
+    //! 5th bit distinguishes thread-bound and regular filters.
+    static const unsigned char filter_is_bound = 0x1<<5;  
+
+    //! 7th bit defines exception propagation mode expected by the application.
+    static const unsigned char exact_exception_propagation =
+#if TBB_USE_CAPTURED_EXCEPTION
+            0x0;
+#else
+            0x1<<7;
+#endif /* TBB_USE_CAPTURED_EXCEPTION */
+
+    static const unsigned char current_version = __TBB_PIPELINE_VERSION(5);
+    static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version
+public:
+    enum mode {
+        //! processes multiple items in parallel and in no particular order
+        parallel = current_version | filter_is_out_of_order, 
+        //! processes items one at a time; all such filters process items in the same order
+        serial_in_order = current_version | filter_is_serial,
+        //! processes items one at a time and in no particular order
+        serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order,
+        //! @deprecated use serial_in_order instead
+        serial = serial_in_order
+    };
+protected:
+    filter( bool is_serial_ ) : 
+        next_filter_in_pipeline(not_in_pipeline()),
+        my_input_buffer(NULL),
+        my_filter_mode(static_cast<unsigned char>((is_serial_ ? serial : parallel) | exact_exception_propagation)),
+        prev_filter_in_pipeline(not_in_pipeline()),
+        my_pipeline(NULL),
+        next_segment(NULL)
+    {}
+    
+    filter( mode filter_mode ) :
+        next_filter_in_pipeline(not_in_pipeline()),
+        my_input_buffer(NULL),
+        my_filter_mode(static_cast<unsigned char>(filter_mode | exact_exception_propagation)),
+        prev_filter_in_pipeline(not_in_pipeline()),
+        my_pipeline(NULL),
+        next_segment(NULL)
+    {}
+
+public:
+    //! True if filter is serial.
+    bool is_serial() const {
+        return bool( my_filter_mode & filter_is_serial );
+    }  
+    
+    //! True if filter must receive stream in order.
+    bool is_ordered() const {
+        return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial;
+    }
+
+    //! True if filter is thread-bound.
+    bool is_bound() const {
+        return ( my_filter_mode & filter_is_bound )==filter_is_bound;
+    }
+
+    //! Operate on an item from the input stream, and return item for output stream.
+    /** Returns NULL if filter is a sink. */
+    virtual void* operator()( void* item ) = 0;
+
+    //! Destroy filter.  
+    /** If the filter was added to a pipeline, the pipeline must be destroyed first. */
+    virtual __TBB_EXPORTED_METHOD ~filter();
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Destroys item if pipeline was cancelled.
+    /** Required to prevent memory leaks.
+        Note it can be called concurrently even for serial filters.*/
+    virtual void finalize( void* /*item*/ ) {};
+#endif
+
+private:
+    //! Pointer to next filter in the pipeline.
+    filter* next_filter_in_pipeline;
+
+    //! has the filter not yet processed all the tokens it will ever see?  
+    //  (pipeline has not yet reached end_of_input or this filter has not yet
+    //  seen the last token produced by input_filter)
+    bool has_more_work();
+
+    //! Buffer for incoming tokens, or NULL if not required.
+    /** The buffer is required if the filter is serial or follows a thread-bound one. */
+    internal::input_buffer* my_input_buffer;
+
+    friend class internal::stage_task;
+    friend class internal::pipeline_root_task;
+    friend class pipeline;
+    friend class thread_bound_filter;
+
+    //! Storage for filter mode and dynamically checked implementation version.
+    const unsigned char my_filter_mode;
+
+    //! Pointer to previous filter in the pipeline.
+    filter* prev_filter_in_pipeline;
+
+    //! Pointer to the pipeline.
+    pipeline* my_pipeline;
+
+    //! Pointer to the next "segment" of filters, or NULL if not required.
+    /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */
+    filter* next_segment;
+};
+
+//! A stage in a pipeline served by a user thread.
+/** @ingroup algorithms */
+class thread_bound_filter: public filter {
+public:
+    enum result_type {
+        // item was processed
+        success,
+        // item is currently not available
+        item_not_available,
+        // there are no more items to process
+        end_of_stream
+    };
+protected:
+    thread_bound_filter(mode filter_mode): 
+         filter(static_cast<mode>(filter_mode | filter::filter_is_bound | filter::exact_exception_propagation))
+    {}
+public:
+    //! If a data item is available, invoke operator() on that item.  
+    /** This interface is non-blocking.
+        Returns 'success' if an item was processed.
+        Returns 'item_not_available' if no item can be processed now 
+        but more may arrive in the future, or if token limit is reached. 
+        Returns 'end_of_stream' if there are no more items to process. */
+    result_type __TBB_EXPORTED_METHOD try_process_item(); 
+
+    //! Wait until a data item becomes available, and invoke operator() on that item.
+    /** This interface is blocking.
+        Returns 'success' if an item was processed.
+        Returns 'end_of_stream' if there are no more items to process.
+        Never returns 'item_not_available', as it blocks until another return condition applies. */
+    result_type __TBB_EXPORTED_METHOD process_item();
+
+private:
+    //! Internal routine for item processing
+    result_type internal_process_item(bool is_blocking);
+};
+
+//! A processing pipeline that applies filters to items.
+/** @ingroup algorithms */
+class pipeline {
+public:
+    //! Construct empty pipeline.
+    __TBB_EXPORTED_METHOD pipeline();
+
+    /** Though the current implementation declares the destructor virtual, do not rely on this 
+        detail.  The virtualness is deprecated and may disappear in future versions of TBB. */
+    virtual __TBB_EXPORTED_METHOD ~pipeline();
+
+    //! Add filter to end of pipeline.
+    void __TBB_EXPORTED_METHOD add_filter( filter& filter_ );
+
+    //! Run the pipeline to completion.
+    void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens );
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Run the pipeline to completion with user-supplied context.
+    void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context );
+#endif
+
+    //! Remove all filters from the pipeline.
+    void __TBB_EXPORTED_METHOD clear();
+
+private:
+    friend class internal::stage_task;
+    friend class internal::pipeline_root_task;
+    friend class filter;
+    friend class thread_bound_filter;
+    friend class internal::pipeline_cleaner;
+    friend class tbb::interface5::internal::pipeline_proxy;
+
+    //! Pointer to first filter in the pipeline.
+    filter* filter_list;
+
+    //! Pointer to location where address of next filter to be added should be stored.
+    filter* filter_end;
+
+    //! task who's reference count is used to determine when all stages are done.
+    task* end_counter;
+
+    //! Number of idle tokens waiting for input stage.
+    atomic<internal::Token> input_tokens;
+
+    //! Global counter of tokens 
+    atomic<internal::Token> token_counter;
+
+    //! False until fetch_input returns NULL.
+    bool end_of_input;
+
+    //! True if the pipeline contains a thread-bound filter; false otherwise.
+    bool has_thread_bound_filters;
+
+    //! Remove filter from pipeline.
+    void remove_filter( filter& filter_ );
+
+    //! Not used, but retained to satisfy old export files.
+    void __TBB_EXPORTED_METHOD inject_token( task& self );
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Does clean up if pipeline is cancelled or exception occured
+    void clear_filters();
+#endif
+};
+
+//------------------------------------------------------------------------
+// Support for lambda-friendly parallel_pipeline interface
+//------------------------------------------------------------------------
+
+namespace interface5 {
+
+namespace internal {
+    template<typename T, typename U, typename Body> class concrete_filter;
+}
+
+//! input_filter control to signal end-of-input for parallel_pipeline
+class flow_control {
+    bool is_pipeline_stopped;
+    flow_control() { is_pipeline_stopped = false; }
+    template<typename T, typename U, typename Body> friend class internal::concrete_filter;
+public:
+    void stop() { is_pipeline_stopped = true; }
+};
+
+//! @cond INTERNAL
+namespace internal {
+
+template<typename T, typename U, typename Body>
+class concrete_filter: public tbb::filter {
+    const Body& my_body;
+
+    typedef typename tbb::tbb_allocator<U> u_allocator;
+    typedef typename tbb::tbb_allocator<T> t_allocator;
+
+    /*override*/ void* operator()(void* input) {
+        T* temp_input = (T*)input;
+        // Call user's operator()() here
+        U* output_u = u_allocator().allocate(1);
+        void* output = (void*) new (output_u) U(my_body(*temp_input)); 
+        t_allocator().destroy(temp_input);
+        t_allocator().deallocate(temp_input,1);
+        return output;
+    }
+
+public:
+    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
+};
+
+template<typename U, typename Body>
+class concrete_filter<void,U,Body>: public filter {
+    const Body& my_body;
+
+    typedef typename tbb::tbb_allocator<U> u_allocator;
+
+    /*override*/void* operator()(void*) {
+        flow_control control;
+        U* output_u = u_allocator().allocate(1);
+        (void) new (output_u) U(my_body(control));
+        if(control.is_pipeline_stopped) {
+            u_allocator().destroy(output_u);
+            u_allocator().deallocate(output_u,1);
+            output_u = NULL;
+        }
+        return (void*)output_u;
+    }
+public:
+    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
+};
+
+template<typename T, typename Body>
+class concrete_filter<T,void,Body>: public filter {
+    const Body& my_body;
+   
+    typedef typename tbb::tbb_allocator<T> t_allocator;
+
+    /*override*/ void* operator()(void* input) {
+        T* temp_input = (T*)input;
+        my_body(*temp_input);
+        t_allocator().destroy(temp_input);
+        t_allocator().deallocate(temp_input,1);
+        return NULL;
+    }
+public:
+    concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
+};
+
+template<typename Body>
+class concrete_filter<void,void,Body>: public filter {
+    const Body& my_body;
+    
+    /** Override privately because it is always called virtually */
+    /*override*/ void* operator()(void*) {
+        flow_control control;
+        my_body(control);
+        void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; 
+        return output;
+    }
+public:
+    concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {}
+};
+
+//! The class that represents an object of the pipeline for parallel_pipeline().
+/** It primarily serves as RAII class that deletes heap-allocated filter instances. */
+class pipeline_proxy {
+    tbb::pipeline my_pipe;
+public:
+    pipeline_proxy( const filter_t<void,void>& filter_chain );
+    ~pipeline_proxy() {
+        while( filter* f = my_pipe.filter_list ) 
+            delete f; // filter destructor removes it from the pipeline
+    }
+    tbb::pipeline* operator->() { return &my_pipe; }
+};
+
+//! Abstract base class that represents a node in a parse tree underlying a filter_t.
+/** These nodes are always heap-allocated and can be shared by filter_t objects. */
+class filter_node: tbb::internal::no_copy {
+    /** Count must be atomic because it is hidden state for user, but might be shared by threads. */
+    tbb::atomic<intptr_t> ref_count;
+protected:
+    filter_node() {
+        ref_count = 0;
+#ifdef __TBB_TEST_FILTER_NODE_COUNT
+        ++(__TBB_TEST_FILTER_NODE_COUNT);
+#endif
+    }
+public:
+    //! Add concrete_filter to pipeline 
+    virtual void add_to( pipeline& ) = 0;
+    //! Increment reference count
+    void add_ref() {++ref_count;}
+    //! Decrement reference count and delete if it becomes zero.
+    void remove_ref() {
+        __TBB_ASSERT(ref_count>0,"ref_count underflow");
+        if( --ref_count==0 ) 
+            delete this;
+    }
+    virtual ~filter_node() {
+#ifdef __TBB_TEST_FILTER_NODE_COUNT
+        --(__TBB_TEST_FILTER_NODE_COUNT);
+#endif
+    }
+};
+
+//! Node in parse tree representing result of make_filter.
+template<typename T, typename U, typename Body>
+class filter_node_leaf: public filter_node  {
+    const tbb::filter::mode mode;
+    const Body body;
+    /*override*/void add_to( pipeline& p ) {
+        concrete_filter<T,U,Body>* f = new concrete_filter<T,U,Body>(mode,body);
+        p.add_filter( *f );
+    }
+public:
+    filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {}
+};
+
+//! Node in parse tree representing join of two filters.
+class filter_node_join: public filter_node {
+    friend class filter_node; // to suppress GCC 3.2 warnings
+    filter_node& left;
+    filter_node& right;
+    /*override*/~filter_node_join() {
+       left.remove_ref();
+       right.remove_ref();
+    }
+    /*override*/void add_to( pipeline& p ) {
+        left.add_to(p);
+        right.add_to(p);
+    }
+public:
+    filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) {
+       left.add_ref();
+       right.add_ref();
+    }
+};
+
+} // namespace internal
+//! @endcond
+
+//! Create a filter to participate in parallel_pipeline
+template<typename T, typename U, typename Body>
+filter_t<T,U> make_filter(tbb::filter::mode mode, const Body& body) {
+    return new internal::filter_node_leaf<T,U,Body>(mode, body);
+}
+
+template<typename T, typename V, typename U>
+filter_t<T,U> operator& (const filter_t<T,V>& left, const filter_t<V,U>& right) {
+    __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'");
+    __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'");
+    return new internal::filter_node_join(*left.root,*right.root);
+}
+
+//! Class representing a chain of type-safe pipeline filters
+template<typename T, typename U>
+class filter_t {
+    typedef internal::filter_node filter_node;
+    filter_node* root;
+    filter_t( filter_node* root_ ) : root(root_) {
+        root->add_ref();
+    }
+    friend class internal::pipeline_proxy;
+    template<typename T_, typename U_, typename Body>
+    friend filter_t<T_,U_> make_filter(tbb::filter::mode, const Body& );
+    template<typename T_, typename V_, typename U_>
+    friend filter_t<T_,U_> operator& (const filter_t<T_,V_>& , const filter_t<V_,U_>& );
+public:
+    filter_t() : root(NULL) {}
+    filter_t( const filter_t<T,U>& rhs ) : root(rhs.root) {
+        if( root ) root->add_ref();
+    }
+    template<typename Body>
+    filter_t( tbb::filter::mode mode, const Body& body ) :
+        root( new internal::filter_node_leaf<T,U,Body>(mode, body) ) {
+        root->add_ref();
+    }
+
+    void operator=( const filter_t<T,U>& rhs ) {
+        // Order of operations below carefully chosen so that reference counts remain correct
+        // in unlikely event that remove_ref throws exception.
+        filter_node* old = root;
+        root = rhs.root; 
+        if( root ) root->add_ref();
+        if( old ) old->remove_ref();
+    }
+    ~filter_t() {
+        if( root ) root->remove_ref();
+    }
+    void clear() {
+        // Like operator= with filter_t() on right side.
+        if( root ) {
+            filter_node* old = root;
+            root = NULL;
+            old->remove_ref();
+        }
+    }
+};
+
+inline internal::pipeline_proxy::pipeline_proxy( const filter_t<void,void>& filter_chain ) : my_pipe() {
+    __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t"  );
+    filter_chain.root->add_to(my_pipe);
+}
+
+inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain
+#if __TBB_TASK_GROUP_CONTEXT
+    , tbb::task_group_context& context
+#endif
+    ) {
+    internal::pipeline_proxy pipe(filter_chain);
+    // tbb::pipeline::run() is called via the proxy
+    pipe->run(max_number_of_live_tokens
+#if __TBB_TASK_GROUP_CONTEXT
+              , context
+#endif
+    );
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t<void,void>& filter_chain) {
+    tbb::task_group_context context;
+    parallel_pipeline(max_number_of_live_tokens, filter_chain, context);
+}
+#endif // __TBB_TASK_GROUP_CONTEXT
+
+} // interface5
+
+using interface5::flow_control;
+using interface5::filter_t;
+using interface5::make_filter;
+using interface5::parallel_pipeline;
+
+} // tbb
+
+#endif /* __TBB_pipeline_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_mutex.h
new file mode 100644 (file)
index 0000000..ec9832c
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_queuing_mutex_H
+#define __TBB_queuing_mutex_H
+
+#include "tbb_config.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "atomic.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+//! Queuing lock with local-only spinning.
+/** @ingroup synchronization */
+class queuing_mutex {
+public:
+    //! Construct unacquired mutex.
+    queuing_mutex() {
+        q_tail = NULL;
+#if TBB_USE_THREADING_TOOLS
+        internal_construct();
+#endif
+    }
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock: internal::no_copy {
+        //! Initialize fields to mean "no lock held".
+        void initialize() {
+            mutex = NULL;
+#if TBB_USE_ASSERT
+            internal::poison_pointer(next);
+#endif /* TBB_USE_ASSERT */
+        }
+    public:
+        //! Construct lock that has not acquired a mutex.
+        /** Equivalent to zero-initialization of *this. */
+        scoped_lock() {initialize();}
+
+        //! Acquire lock on given mutex.
+        scoped_lock( queuing_mutex& m ) {
+            initialize();
+            acquire(m);
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( mutex ) release();
+        }
+
+        //! Acquire lock on given mutex.
+        void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );
+
+        //! Acquire lock on given mutex if free (i.e. non-blocking)
+        bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );
+
+        //! Release lock.
+        void __TBB_EXPORTED_METHOD release();
+
+    private:
+        //! The pointer to the mutex owned, or NULL if not holding a mutex.
+        queuing_mutex* mutex;
+
+        //! The pointer to the next competitor for a mutex
+        scoped_lock *next;
+
+        //! The local spin-wait variable
+        /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of 
+            zero-initialization.  Defining it as an entire word instead of
+            a byte seems to help performance slightly. */
+        uintptr_t going;
+    };
+
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    // Mutex traits
+    static const bool is_rw_mutex = false;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = true;
+
+    friend class scoped_lock;
+private:
+    //! The last competitor requesting the lock
+    atomic<scoped_lock*> q_tail;
+
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
+
+} // namespace tbb
+
+#endif /* __TBB_queuing_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_rw_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/queuing_rw_mutex.h
new file mode 100644 (file)
index 0000000..5e35478
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_queuing_rw_mutex_H
+#define __TBB_queuing_rw_mutex_H
+
+#include "tbb_config.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "atomic.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+//! Reader-writer lock with local-only spinning.
+/** Adapted from Krieger, Stumm, et al. pseudocode at
+    http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93
+    @ingroup synchronization */
+class queuing_rw_mutex {
+public:
+    //! Construct unacquired mutex.
+    queuing_rw_mutex() {
+        q_tail = NULL;
+#if TBB_USE_THREADING_TOOLS
+        internal_construct();
+#endif
+    }
+
+    //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL
+    ~queuing_rw_mutex() {
+#if TBB_USE_ASSERT
+        __TBB_ASSERT( !q_tail, "destruction of an acquired mutex");
+#endif
+    }
+
+    class scoped_lock;
+    friend class scoped_lock;
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock: internal::no_copy {
+        //! Initialize fields
+        void initialize() {
+            mutex = NULL;
+#if TBB_USE_ASSERT
+            state = 0xFF; // Set to invalid state
+            internal::poison_pointer(next);
+            internal::poison_pointer(prev);
+#endif /* TBB_USE_ASSERT */
+        }
+    public:
+        //! Construct lock that has not acquired a mutex.
+        /** Equivalent to zero-initialization of *this. */
+        scoped_lock() {initialize();}
+
+        //! Acquire lock on given mutex.
+        scoped_lock( queuing_rw_mutex& m, bool write=true ) {
+            initialize();
+            acquire(m,write);
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( mutex ) release();
+        }
+
+        //! Acquire lock on given mutex.
+        void acquire( queuing_rw_mutex& m, bool write=true );
+
+        //! Try acquire lock on given mutex.
+        bool try_acquire( queuing_rw_mutex& m, bool write=true );
+
+        //! Release lock.
+        void release();
+
+        //! Upgrade reader to become a writer.
+        /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
+        bool upgrade_to_writer();
+
+        //! Downgrade writer to become a reader.
+        bool downgrade_to_reader();
+
+    private:
+        //! The pointer to the current mutex to work
+        queuing_rw_mutex* mutex;
+
+        //! The pointer to the previous and next competitors for a mutex
+        scoped_lock * prev, * next;
+
+        typedef unsigned char state_t;
+
+        //! State of the request: reader, writer, active reader, other service states
+        atomic<state_t> state;
+
+        //! The local spin-wait variable
+        /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */
+        unsigned char going;
+
+        //! A tiny internal lock
+        unsigned char internal_lock;
+
+        //! Acquire the internal lock
+        void acquire_internal_lock();
+
+        //! Try to acquire the internal lock
+        /** Returns true if lock was successfully acquired. */
+        bool try_acquire_internal_lock();
+
+        //! Release the internal lock
+        void release_internal_lock();
+
+        //! Wait for internal lock to be released
+        void wait_for_release_of_internal_lock();
+
+        //! A helper function
+        void unblock_or_wait_on_internal_lock( uintptr_t );
+    };
+
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    // Mutex traits
+    static const bool is_rw_mutex = true;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = true;
+
+private:
+    //! The last competitor requesting the lock
+    atomic<scoped_lock*> q_tail;
+
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex)
+
+} // namespace tbb
+
+#endif /* __TBB_queuing_rw_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/reader_writer_lock.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/reader_writer_lock.h
new file mode 100644 (file)
index 0000000..3a63969
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_reader_writer_lock_H
+#define __TBB_reader_writer_lock_H
+
+#include "tbb_thread.h"
+#include "tbb_allocator.h"
+#include "atomic.h"
+
+namespace tbb {
+namespace interface5 {
+//! Writer-preference reader-writer lock with local-only spinning on readers.
+/** Loosely adapted from Mellor-Crummey and Scott pseudocode at
+    http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp
+    @ingroup synchronization */
+    class reader_writer_lock : tbb::internal::no_copy {
+ public:
+    friend class scoped_lock;
+    friend class scoped_lock_read;
+    //! Status type for nodes associated with lock instances
+    /** waiting_nonblocking: the wait state for nonblocking lock
+          instances; for writes, these transition straight to active
+          states; for reads, these are unused.
+
+        waiting: the start and spin state for all lock instances; these will
+          transition to active state when appropriate.  Non-blocking write locks
+          transition from this state to waiting_nonblocking immediately.
+
+        active: the active state means that the lock instance holds
+          the lock; it will transition to invalid state during node deletion
+
+        invalid: the end state for all nodes; this is set in the
+          destructor so if we encounter this state, we are looking at
+          memory that has already been freed
+        
+        The state diagrams below describe the status transitions.
+        Single arrows indicate that the thread that owns the node is
+        responsible for the transition; double arrows indicate that
+        any thread could make the transition.
+
+        State diagram for scoped_lock status:
+
+        waiting ----------> waiting_nonblocking
+          |     _____________/       |
+          V    V                     V
+        active -----------------> invalid
+  
+        State diagram for scoped_lock_read status:
+
+        waiting 
+          |                        
+          V                        
+        active ----------------->invalid
+
+    */
+    enum status_t { waiting_nonblocking, waiting, active, invalid };
+
+    //! Constructs a new reader_writer_lock
+    reader_writer_lock() {
+        internal_construct();
+    }
+
+    //! Destructs a reader_writer_lock object
+    ~reader_writer_lock() {
+        internal_destroy();
+    }
+
+    //! The scoped lock pattern for write locks
+    /** Scoped locks help avoid the common problem of forgetting to release the lock.
+        This type is also serves as the node for queuing locks. */
+    class scoped_lock : tbb::internal::no_copy {
+    public:
+        friend class reader_writer_lock;
+        //! Construct with blocking attempt to acquire write lock on the passed-in lock 
+        scoped_lock(reader_writer_lock& lock) {
+            internal_construct(lock);
+        }
+        
+        //! Destructor, releases the write lock
+        ~scoped_lock() {
+            internal_destroy();
+        }
+
+        void* operator new(size_t s) {
+            return tbb::internal::allocate_via_handler_v3(s);
+        }
+        void operator delete(void* p) {
+            tbb::internal::deallocate_via_handler_v3(p);
+        }
+
+    private:
+        //! The pointer to the mutex to lock
+        reader_writer_lock *mutex;
+        //! The next queued competitor for the mutex
+        scoped_lock* next;
+        //! Status flag of the thread associated with this node
+        atomic<status_t> status;
+
+        //! Construct scoped_lock that is not holding lock
+        scoped_lock();
+
+        void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
+        void __TBB_EXPORTED_METHOD internal_destroy();
+   };
+
+    //! The scoped lock pattern for read locks
+    class scoped_lock_read : tbb::internal::no_copy {
+    public:
+        friend class reader_writer_lock;
+
+        //! Construct with blocking attempt to acquire read lock on the passed-in lock 
+        scoped_lock_read(reader_writer_lock& lock) {
+            internal_construct(lock);
+        }
+
+        //! Destructor, releases the read lock
+        ~scoped_lock_read() { 
+            internal_destroy();
+        }
+        
+        void* operator new(size_t s) {
+            return tbb::internal::allocate_via_handler_v3(s);
+        }
+        void operator delete(void* p) {
+            tbb::internal::deallocate_via_handler_v3(p);
+        }
+
+    private:
+        //! The pointer to the mutex to lock
+        reader_writer_lock *mutex;
+        //! The next queued competitor for the mutex
+        scoped_lock_read *next;
+        //! Status flag of the thread associated with this node
+        atomic<status_t> status;
+
+        //! Construct scoped_lock_read that is not holding lock
+        scoped_lock_read();
+
+        void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
+        void __TBB_EXPORTED_METHOD internal_destroy();
+    };
+    
+    //! Acquires the reader_writer_lock for write.  
+    /** If the lock is currently held in write mode by another
+        context, the writer will block by spinning on a local
+        variable.  Exceptions thrown: improper_lock The context tries
+        to acquire a reader_writer_lock that it already has write
+        ownership of.*/
+    void __TBB_EXPORTED_METHOD lock();
+
+    //! Tries to acquire the reader_writer_lock for write.   
+    /** This function does not block.  Return Value: True or false,
+        depending on whether the lock is acquired or not.  If the lock
+        is already held by this acquiring context, try_lock() returns
+        false. */
+    bool __TBB_EXPORTED_METHOD try_lock();
+
+    //! Acquires the reader_writer_lock for read.    
+    /** If the lock is currently held by a writer, this reader will
+        block and wait until the writers are done.  Exceptions thrown:
+        improper_lock The context tries to acquire a
+        reader_writer_lock that it already has write ownership of. */
+    void __TBB_EXPORTED_METHOD lock_read(); 
+
+    //! Tries to acquire the reader_writer_lock for read.  
+    /** This function does not block.  Return Value: True or false,
+        depending on whether the lock is acquired or not.  */
+    bool __TBB_EXPORTED_METHOD try_lock_read();
+
+    //! Releases the reader_writer_lock
+    void __TBB_EXPORTED_METHOD unlock();
+
+ private:
+    void __TBB_EXPORTED_METHOD internal_construct();
+    void __TBB_EXPORTED_METHOD internal_destroy();
+
+    //! Attempts to acquire write lock
+    /** If unavailable, spins in blocking case, returns false in non-blocking case. */
+    bool start_write(scoped_lock *);
+    //! Sets writer_head to w and attempts to unblock
+    void set_next_writer(scoped_lock *w);
+    //! Relinquishes write lock to next waiting writer or group of readers 
+    void end_write(scoped_lock *);
+    //! Checks if current thread holds write lock
+    bool is_current_writer();
+
+    //! Attempts to acquire read lock
+    /** If unavailable, spins in blocking case, returns false in non-blocking case. */
+    void start_read(scoped_lock_read *);
+    //! Unblocks pending readers
+    void unblock_readers();
+    //! Relinquishes read lock by decrementing counter; last reader wakes pending writer
+    void end_read();
+
+    //! The list of pending readers
+    atomic<scoped_lock_read*> reader_head;
+    //! The list of pending writers
+    atomic<scoped_lock*> writer_head;
+    //! The last node in the list of pending writers
+    atomic<scoped_lock*> writer_tail;
+    //! Writer that owns the mutex; tbb_thread::id() otherwise.
+    tbb_thread::id my_current_writer;
+    //! Status of mutex
+    atomic<unsigned> rdr_count_and_flags;
+};
+
+} // namespace interface5
+
+using interface5::reader_writer_lock;
+
+} // namespace tbb
+
+#endif /* __TBB_reader_writer_lock_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/recursive_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/recursive_mutex.h
new file mode 100644 (file)
index 0000000..f5ae5ed
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_recursive_mutex_H
+#define __TBB_recursive_mutex_H
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#else
+#include <pthread.h>
+#endif /* _WIN32||_WIN64 */
+
+#include <new>
+#include "aligned_space.h"
+#include "tbb_stddef.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+//! Mutex that allows recursive mutex acquisition.
+/** Mutex that allows recursive mutex acquisition.
+    @ingroup synchronization */
+class recursive_mutex {
+public:
+    //! Construct unacquired recursive_mutex.
+    recursive_mutex() {
+#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS
+        internal_construct();
+#else
+  #if _WIN32||_WIN64
+        InitializeCriticalSection(&impl);
+  #else
+        pthread_mutexattr_t mtx_attr;
+        int error_code = pthread_mutexattr_init( &mtx_attr );
+        if( error_code )
+            tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed");
+
+        pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE );
+        error_code = pthread_mutex_init( &impl, &mtx_attr );
+        if( error_code )
+            tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed");
+
+        pthread_mutexattr_destroy( &mtx_attr );
+  #endif /* _WIN32||_WIN64*/
+#endif /* TBB_USE_ASSERT */
+    };
+
+    ~recursive_mutex() {
+#if TBB_USE_ASSERT
+        internal_destroy();
+#else
+  #if _WIN32||_WIN64
+        DeleteCriticalSection(&impl);
+  #else
+        pthread_mutex_destroy(&impl); 
+
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    };
+
+    class scoped_lock;
+    friend class scoped_lock;
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock: internal::no_copy {
+    public:
+        //! Construct lock that has not acquired a recursive_mutex. 
+        scoped_lock() : my_mutex(NULL) {};
+
+        //! Acquire lock on given mutex.
+        scoped_lock( recursive_mutex& mutex ) {
+#if TBB_USE_ASSERT
+            my_mutex = &mutex; 
+#endif /* TBB_USE_ASSERT */
+            acquire( mutex );
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( my_mutex ) 
+                release();
+        }
+
+        //! Acquire lock on given mutex.
+        void acquire( recursive_mutex& mutex ) {
+#if TBB_USE_ASSERT
+            internal_acquire( mutex );
+#else
+            my_mutex = &mutex;
+            mutex.lock();
+#endif /* TBB_USE_ASSERT */
+        }
+
+        //! Try acquire lock on given recursive_mutex.
+        bool try_acquire( recursive_mutex& mutex ) {
+#if TBB_USE_ASSERT
+            return internal_try_acquire( mutex );
+#else
+            bool result = mutex.try_lock();
+            if( result )
+                my_mutex = &mutex;
+            return result;
+#endif /* TBB_USE_ASSERT */
+        }
+
+        //! Release lock
+        void release() {
+#if TBB_USE_ASSERT
+            internal_release();
+#else
+            my_mutex->unlock();
+            my_mutex = NULL;
+#endif /* TBB_USE_ASSERT */
+        }
+
+    private:
+        //! The pointer to the current recursive_mutex to work
+        recursive_mutex* my_mutex;
+
+        //! All checks from acquire using mutex.state were moved here
+        void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m );
+
+        //! All checks from try_acquire using mutex.state were moved here
+        bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m );
+
+        //! All checks from release using mutex.state were moved here
+        void __TBB_EXPORTED_METHOD internal_release();
+
+        friend class recursive_mutex;
+    };
+
+    // Mutex traits
+    static const bool is_rw_mutex = false;
+    static const bool is_recursive_mutex = true;
+    static const bool is_fair_mutex = false;
+
+    // C++0x compatibility interface
+    
+    //! Acquire lock
+    void lock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        new(tmp.begin()) scoped_lock(*this);
+#else
+  #if _WIN32||_WIN64
+        EnterCriticalSection(&impl);
+  #else
+        pthread_mutex_lock(&impl);
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Try acquiring lock (non-blocking)
+    /** Return true if lock acquired; false otherwise. */
+    bool try_lock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
+#else        
+  #if _WIN32||_WIN64
+        return TryEnterCriticalSection(&impl)!=0;
+  #else
+        return pthread_mutex_trylock(&impl)==0;
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Release lock
+    void unlock() {
+#if TBB_USE_ASSERT
+        aligned_space<scoped_lock,1> tmp;
+        scoped_lock& s = *tmp.begin();
+        s.my_mutex = this;
+        s.internal_release();
+#else
+  #if _WIN32||_WIN64
+        LeaveCriticalSection(&impl);
+  #else
+        pthread_mutex_unlock(&impl);
+  #endif /* _WIN32||_WIN64 */
+#endif /* TBB_USE_ASSERT */
+    }
+
+    //! Return native_handle
+  #if _WIN32||_WIN64
+    typedef LPCRITICAL_SECTION native_handle_type;
+  #else
+    typedef pthread_mutex_t* native_handle_type;
+  #endif
+    native_handle_type native_handle() { return (native_handle_type) &impl; }
+
+private:
+#if _WIN32||_WIN64
+    CRITICAL_SECTION impl;
+    enum state_t {
+        INITIALIZED=0x1234,
+        DESTROYED=0x789A,
+    } state;
+#else
+    pthread_mutex_t impl;
+#endif /* _WIN32||_WIN64 */
+
+    //! All checks from mutex constructor using mutex.state were moved here
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    //! All checks from mutex destructor using mutex.state were moved here
+    void __TBB_EXPORTED_METHOD internal_destroy();
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex)
+
+} // namespace tbb 
+
+#endif /* __TBB_recursive_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/scalable_allocator.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/scalable_allocator.h
new file mode 100644 (file)
index 0000000..2293803
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_scalable_allocator_H
+#define __TBB_scalable_allocator_H
+/** @file */
+
+#include <stddef.h> /* Need ptrdiff_t and size_t from here. */
+
+#if !defined(__cplusplus) && __ICC==1100
+    #pragma warning (push)
+    #pragma warning (disable: 991)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#if _MSC_VER >= 1400
+#define __TBB_EXPORTED_FUNC   __cdecl
+#else
+#define __TBB_EXPORTED_FUNC
+#endif
+
+/** The "malloc" analogue to allocate block of memory of size bytes.
+  * @ingroup memory_allocation */
+void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size);
+
+/** The "free" analogue to discard a previously allocated piece of memory.
+    @ingroup memory_allocation */
+void   __TBB_EXPORTED_FUNC scalable_free (void* ptr);
+
+/** The "realloc" analogue complementing scalable_malloc.
+    @ingroup memory_allocation */
+void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size);
+
+/** The "calloc" analogue complementing scalable_malloc.
+    @ingroup memory_allocation */
+void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size);
+
+/** The "posix_memalign" analogue.
+    @ingroup memory_allocation */
+int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size);
+
+/** The "_aligned_malloc" analogue.
+    @ingroup memory_allocation */
+void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment);
+
+/** The "_aligned_realloc" analogue.
+    @ingroup memory_allocation */
+void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment);
+
+/** The "_aligned_free" analogue.
+    @ingroup memory_allocation */
+void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr);
+
+/** The analogue of _msize/malloc_size/malloc_usable_size.
+    Returns the usable size of a memory block previously allocated by scalable_*,
+    or 0 (zero) if ptr does not point to such a block.
+    @ingroup memory_allocation */
+size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#ifdef __cplusplus
+
+#include <new>      /* To use new with the placement argument */
+
+/* Ensure that including this header does not cause implicit linkage with TBB */
+#ifndef __TBB_NO_IMPLICIT_LINKAGE
+    #define __TBB_NO_IMPLICIT_LINKAGE 1
+    #include "tbb_stddef.h"
+    #undef  __TBB_NO_IMPLICIT_LINKAGE
+#else
+    #include "tbb_stddef.h"
+#endif
+
+
+namespace tbb {
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for erroneous "unreferenced parameter" warning in method destroy.
+    #pragma warning (push)
+    #pragma warning (disable: 4100)
+#endif
+
+//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5
+/** The members are ordered the same way they are in section 20.4.1
+    of the ISO C++ standard.
+    @ingroup memory_allocation */
+template<typename T>
+class scalable_allocator {
+public:
+    typedef typename internal::allocator_type<T>::value_type value_type;
+    typedef value_type* pointer;
+    typedef const value_type* const_pointer;
+    typedef value_type& reference;
+    typedef const value_type& const_reference;
+    typedef size_t size_type;
+    typedef ptrdiff_t difference_type;
+    template<class U> struct rebind {
+        typedef scalable_allocator<U> other;
+    };
+
+    scalable_allocator() throw() {}
+    scalable_allocator( const scalable_allocator& ) throw() {}
+    template<typename U> scalable_allocator(const scalable_allocator<U>&) throw() {}
+
+    pointer address(reference x) const {return &x;}
+    const_pointer address(const_reference x) const {return &x;}
+
+    //! Allocate space for n objects.
+    pointer allocate( size_type n, const void* /*hint*/ =0 ) {
+        return static_cast<pointer>( scalable_malloc( n * sizeof(value_type) ) );
+    }
+
+    //! Free previously allocated block of memory
+    void deallocate( pointer p, size_type ) {
+        scalable_free( p );
+    }
+
+    //! Largest value for which method allocate might succeed.
+    size_type max_size() const throw() {
+        size_type absolutemax = static_cast<size_type>(-1) / sizeof (value_type);
+        return (absolutemax > 0 ? absolutemax : 1);
+    }
+    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}
+    void destroy( pointer p ) {p->~value_type();}
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warning 4100 is back
+
+//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1
+/** @ingroup memory_allocation */
+template<>
+class scalable_allocator<void> {
+public:
+    typedef void* pointer;
+    typedef const void* const_pointer;
+    typedef void value_type;
+    template<class U> struct rebind {
+        typedef scalable_allocator<U> other;
+    };
+};
+
+template<typename T, typename U>
+inline bool operator==( const scalable_allocator<T>&, const scalable_allocator<U>& ) {return true;}
+
+template<typename T, typename U>
+inline bool operator!=( const scalable_allocator<T>&, const scalable_allocator<U>& ) {return false;}
+
+} // namespace tbb
+
+#if _MSC_VER
+    #if __TBB_BUILD && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE)
+        #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1
+    #endif
+
+    #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE
+        #ifdef _DEBUG
+            #pragma comment(lib, "tbbmalloc_debug.lib")
+        #else
+            #pragma comment(lib, "tbbmalloc.lib")
+        #endif
+    #endif
+
+
+#endif
+
+#endif /* __cplusplus */
+
+#if !defined(__cplusplus) && __ICC==1100
+    #pragma warning (pop)
+#endif // ICC 11.0 warning 991 is back
+
+#endif /* __TBB_scalable_allocator_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_mutex.h
new file mode 100644 (file)
index 0000000..5d47513
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_spin_mutex_H
+#define __TBB_spin_mutex_H
+
+#include <cstddef>
+#include <new>
+#include "aligned_space.h"
+#include "tbb_stddef.h"
+#include "tbb_machine.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+//! A lock that occupies a single byte.
+/** A spin_mutex is a spin mutex that fits in a single byte.  
+    It should be used only for locking short critical sections 
+    (typically less than 20 instructions) when fairness is not an issue.  
+    If zero-initialized, the mutex is considered unheld.
+    @ingroup synchronization */
+class spin_mutex {
+    //! 0 if lock is released, 1 if lock is acquired.
+    unsigned char flag;
+
+public:
+    //! Construct unacquired lock.
+    /** Equivalent to zero-initialization of *this. */
+    spin_mutex() : flag(0) {
+#if TBB_USE_THREADING_TOOLS
+        internal_construct();
+#endif
+    }
+
+    //! Represents acquisition of a mutex.
+    class scoped_lock : internal::no_copy {
+    private:
+        //! Points to currently held mutex, or NULL if no lock is held.
+        spin_mutex* my_mutex; 
+
+        //! Value to store into spin_mutex::flag to unlock the mutex.
+        uintptr_t my_unlock_value;
+
+        //! Like acquire, but with ITT instrumentation.
+        void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );
+
+        //! Like try_acquire, but with ITT instrumentation.
+        bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );
+
+        //! Like release, but with ITT instrumentation.
+        void __TBB_EXPORTED_METHOD internal_release();
+
+        friend class spin_mutex;
+
+    public:
+        //! Construct without acquiring a mutex.
+        scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}
+
+        //! Construct and acquire lock on a mutex.
+        scoped_lock( spin_mutex& m ) { 
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            my_mutex=NULL;
+            internal_acquire(m);
+#else
+            my_unlock_value = __TBB_LockByte(m.flag);
+            my_mutex=&m;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
+        }
+
+        //! Acquire lock.
+        void acquire( spin_mutex& m ) {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            internal_acquire(m);
+#else
+            my_unlock_value = __TBB_LockByte(m.flag);
+            my_mutex = &m;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
+        }
+
+        //! Try acquiring lock (non-blocking)
+        /** Return true if lock acquired; false otherwise. */
+        bool try_acquire( spin_mutex& m ) {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            return internal_try_acquire(m);
+#else
+            bool result = __TBB_TryLockByte(m.flag);
+            if( result ) {
+                my_unlock_value = 0;
+                my_mutex = &m;
+            }
+            return result;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
+        }
+
+        //! Release lock
+        void release() {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            internal_release();
+#else
+            __TBB_store_with_release(my_mutex->flag, static_cast<unsigned char>(my_unlock_value));
+            my_mutex = NULL;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+        }
+
+        //! Destroy lock.  If holding a lock, releases the lock first.
+        ~scoped_lock() {
+            if( my_mutex ) {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+                internal_release();
+#else
+                __TBB_store_with_release(my_mutex->flag, static_cast<unsigned char>(my_unlock_value));
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+            }
+        }
+    };
+
+    void __TBB_EXPORTED_METHOD internal_construct();
+
+    // Mutex traits
+    static const bool is_rw_mutex = false;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = false;
+
+    // ISO C++0x compatibility methods
+
+    //! Acquire lock
+    void lock() {
+#if TBB_USE_THREADING_TOOLS
+        aligned_space<scoped_lock,1> tmp;
+        new(tmp.begin()) scoped_lock(*this);
+#else
+        __TBB_LockByte(flag);
+#endif /* TBB_USE_THREADING_TOOLS*/
+    }
+
+    //! Try acquiring lock (non-blocking)
+    /** Return true if lock acquired; false otherwise. */
+    bool try_lock() {
+#if TBB_USE_THREADING_TOOLS
+        aligned_space<scoped_lock,1> tmp;
+        return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
+#else
+        return __TBB_TryLockByte(flag);
+#endif /* TBB_USE_THREADING_TOOLS*/
+    }
+
+    //! Release lock
+    void unlock() {
+#if TBB_USE_THREADING_TOOLS
+        aligned_space<scoped_lock,1> tmp;
+        scoped_lock& s = *tmp.begin();
+        s.my_mutex = this;
+        s.my_unlock_value = 0;
+        s.internal_release();
+#else
+        __TBB_store_with_release(flag, 0);
+#endif /* TBB_USE_THREADING_TOOLS */
+    }
+
+    friend class scoped_lock;
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
+
+} // namespace tbb
+
+#endif /* __TBB_spin_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_rw_mutex.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/spin_rw_mutex.h
new file mode 100644 (file)
index 0000000..38b3a1f
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_spin_rw_mutex_H
+#define __TBB_spin_rw_mutex_H
+
+#include "tbb_stddef.h"
+#include "tbb_machine.h"
+#include "tbb_profiling.h"
+
+namespace tbb {
+
+class spin_rw_mutex_v3;
+typedef spin_rw_mutex_v3 spin_rw_mutex;
+
+//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference
+/** @ingroup synchronization */
+class spin_rw_mutex_v3 {
+    //! @cond INTERNAL
+
+    //! Internal acquire write lock.
+    bool __TBB_EXPORTED_METHOD internal_acquire_writer();
+
+    //! Out of line code for releasing a write lock.  
+    /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */
+    void __TBB_EXPORTED_METHOD internal_release_writer();
+
+    //! Internal acquire read lock.
+    void __TBB_EXPORTED_METHOD internal_acquire_reader();
+
+    //! Internal upgrade reader to become a writer.
+    bool __TBB_EXPORTED_METHOD internal_upgrade();
+
+    //! Out of line code for downgrading a writer to a reader.   
+    /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */
+    void __TBB_EXPORTED_METHOD internal_downgrade();
+
+    //! Internal release read lock.
+    void __TBB_EXPORTED_METHOD internal_release_reader();
+
+    //! Internal try_acquire write lock.
+    bool __TBB_EXPORTED_METHOD internal_try_acquire_writer();
+
+    //! Internal try_acquire read lock.
+    bool __TBB_EXPORTED_METHOD internal_try_acquire_reader();
+
+    //! @endcond
+public:
+    //! Construct unacquired mutex.
+    spin_rw_mutex_v3() : state(0) {
+#if TBB_USE_THREADING_TOOLS
+        internal_construct();
+#endif
+    }
+
+#if TBB_USE_ASSERT
+    //! Destructor asserts if the mutex is acquired, i.e. state is zero.
+    ~spin_rw_mutex_v3() {
+        __TBB_ASSERT( !state, "destruction of an acquired mutex");
+    };
+#endif /* TBB_USE_ASSERT */
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock : internal::no_copy {
+    public:
+        //! Construct lock that has not acquired a mutex.
+        /** Equivalent to zero-initialization of *this. */
+        scoped_lock() : mutex(NULL), is_writer(false) {}
+
+        //! Acquire lock on given mutex.
+        scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) {
+            acquire(m, write);
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( mutex ) release();
+        }
+
+        //! Acquire lock on given mutex.
+        void acquire( spin_rw_mutex& m, bool write = true ) {
+            __TBB_ASSERT( !mutex, "holding mutex already" );
+            is_writer = write; 
+            mutex = &m;
+            if( write ) mutex->internal_acquire_writer();
+            else        mutex->internal_acquire_reader();
+        }
+
+        //! Upgrade reader to become a writer.
+        /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
+        bool upgrade_to_writer() {
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            __TBB_ASSERT( !is_writer, "not a reader" );
+            is_writer = true; 
+            return mutex->internal_upgrade();
+        }
+
+        //! Release lock.
+        void release() {
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            spin_rw_mutex *m = mutex; 
+            mutex = NULL;
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            if( is_writer ) m->internal_release_writer();
+            else            m->internal_release_reader();
+#else
+            if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); 
+            else            __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER);
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+        }
+
+        //! Downgrade writer to become a reader.
+        bool downgrade_to_reader() {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            __TBB_ASSERT( is_writer, "not a writer" );
+            mutex->internal_downgrade();
+#else
+            __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER));
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+            is_writer = false;
+
+            return true;
+        }
+
+        //! Try acquire lock on given mutex.
+        bool try_acquire( spin_rw_mutex& m, bool write = true ) {
+            __TBB_ASSERT( !mutex, "holding mutex already" );
+            bool result;
+            is_writer = write; 
+            result = write? m.internal_try_acquire_writer()
+                          : m.internal_try_acquire_reader();
+            if( result ) 
+                mutex = &m;
+            return result;
+        }
+
+    private:
+        //! The pointer to the current mutex that is held, or NULL if no mutex is held.
+        spin_rw_mutex* mutex;
+
+        //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock.
+        /** Not defined if not holding a lock. */
+        bool is_writer;
+    };
+
+    // Mutex traits
+    static const bool is_rw_mutex = true;
+    static const bool is_recursive_mutex = false;
+    static const bool is_fair_mutex = false;
+
+    // ISO C++0x compatibility methods
+
+    //! Acquire writer lock
+    void lock() {internal_acquire_writer();}
+
+    //! Try acquiring writer lock (non-blocking)
+    /** Return true if lock acquired; false otherwise. */
+    bool try_lock() {return internal_try_acquire_writer();}
+
+    //! Release lock
+    void unlock() {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+        if( state&WRITER ) internal_release_writer();
+        else               internal_release_reader();
+#else
+        if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); 
+        else               __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER);
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+    }
+
+    // Methods for reader locks that resemble ISO C++0x compatibility methods.
+
+    //! Acquire reader lock
+    void lock_read() {internal_acquire_reader();}
+
+    //! Try acquiring reader lock (non-blocking)
+    /** Return true if reader lock acquired; false otherwise. */
+    bool try_lock_read() {return internal_try_acquire_reader();}
+
+private:
+    typedef intptr_t state_t;
+    static const state_t WRITER = 1;
+    static const state_t WRITER_PENDING = 2;
+    static const state_t READERS = ~(WRITER | WRITER_PENDING);
+    static const state_t ONE_READER = 4;
+    static const state_t BUSY = WRITER | READERS;
+    //! State of lock
+    /** Bit 0 = writer is holding lock
+        Bit 1 = request by a writer to acquire lock (hint to readers to wait)
+        Bit 2..N = number of readers holding lock */
+    state_t state;
+
+    void __TBB_EXPORTED_METHOD internal_construct();
+};
+
+__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex)
+
+} // namespace tbb
+
+#endif /* __TBB_spin_rw_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task.h
new file mode 100644 (file)
index 0000000..0f876b0
--- /dev/null
@@ -0,0 +1,838 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_task_H
+#define __TBB_task_H
+
+#include "tbb_stddef.h"
+#include "tbb_machine.h"
+
+typedef struct ___itt_caller *__itt_caller;
+
+namespace tbb {
+
+class task;
+class task_list;
+
+#if __TBB_TASK_GROUP_CONTEXT
+class task_group_context;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+// MSVC does not allow taking the address of a member that was defined 
+// privately in task_base and made public in class task via a using declaration.
+#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
+#define __TBB_TASK_BASE_ACCESS public
+#else
+#define __TBB_TASK_BASE_ACCESS private
+#endif
+
+namespace internal {
+
+    class allocate_additional_child_of_proxy: no_assign {
+        //! No longer used, but retained for binary layout compatibility.  Always NULL.
+        task* self;
+        task& parent;
+    public:
+        explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
+        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
+        void __TBB_EXPORTED_METHOD free( task& ) const;
+    };
+
+}
+
+namespace interface5 {
+    namespace internal {
+        //! Base class for methods that became static in TBB 3.0.
+        /** TBB's evolution caused the "this" argument for several methods to become obsolete.
+            However, for backwards binary compatibility, the new methods need distinct names,
+            otherwise the One Definition Rule would be broken.  Hence the new methods are 
+            defined in this private base class, and then exposed in class task via 
+            using declarations. */
+        class task_base: tbb::internal::no_copy {
+        __TBB_TASK_BASE_ACCESS:
+            friend class tbb::task;
+
+            //! Schedule task for execution when a worker becomes available.
+            static void spawn( task& t );
+            //! Spawn multiple tasks and clear list.
+            static void spawn( task_list& list );
+
+            //! Like allocate_child, except that task's parent becomes "t", not this.
+            /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
+               Atomically increments the reference count of t.parent() */
+            static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
+                return tbb::internal::allocate_additional_child_of_proxy(t);
+            }
+
+            //! Destroy a task.
+            /** Usually, calling this method is unnecessary, because a task is
+                implicitly deleted after its execute() method runs.  However,
+                sometimes a task needs to be explicitly deallocated, such as
+                when a root task is used as the parent in spawn_and_wait_for_all. */
+            static void __TBB_EXPORTED_FUNC destroy( task& victim );
+        }; 
+    } // internal
+} // interface5
+
+//! @cond INTERNAL
+namespace internal {
+
+    class scheduler: no_copy {
+    public:
+        //! For internal use only
+        virtual void spawn( task& first, task*& next ) = 0;
+
+        //! For internal use only
+        virtual void wait_for_all( task& parent, task* child ) = 0;
+
+        //! For internal use only
+        virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
+
+        //! Pure virtual destructor;
+        //  Have to have it just to shut up overzealous compilation warnings
+        virtual ~scheduler() = 0;
+#if __TBB_ARENA_PER_MASTER
+
+        //! For internal use only
+        virtual void enqueue( task& t, void* reserved ) = 0;
+#endif /* __TBB_ARENA_PER_MASTER */
+    };
+
+    //! A reference count
+    /** Should always be non-negative.  A signed type is used so that underflow can be detected. */
+    typedef intptr_t reference_count;
+
+    //! An id as used for specifying affinity.
+    typedef unsigned short affinity_id;
+
+#if __TBB_TASK_GROUP_CONTEXT
+    struct context_list_node_t {
+        context_list_node_t *my_prev,
+                            *my_next;
+    };
+
+    class allocate_root_with_context_proxy: no_assign {
+        task_group_context& my_context;
+    public:
+        allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
+        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
+        void __TBB_EXPORTED_METHOD free( task& ) const;
+    };
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+    class allocate_root_proxy: no_assign {
+    public:
+        static task& __TBB_EXPORTED_FUNC allocate( size_t size );
+        static void __TBB_EXPORTED_FUNC free( task& );
+    };
+
+    class allocate_continuation_proxy: no_assign {
+    public:
+        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
+        void __TBB_EXPORTED_METHOD free( task& ) const;
+    };
+
+    class allocate_child_proxy: no_assign {
+    public:
+        task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
+        void __TBB_EXPORTED_METHOD free( task& ) const;
+    };
+
+    //! Memory prefix to a task object.
+    /** This class is internal to the library.
+        Do not reference it directly, except within the library itself.
+        Fields are ordered in way that preserves backwards compatibility and yields 
+        good packing on typical 32-bit and 64-bit platforms.
+        @ingroup task_scheduling */
+    class task_prefix {
+    private:
+        friend class tbb::task;
+        friend class tbb::interface5::internal::task_base;
+        friend class tbb::task_list;
+        friend class internal::scheduler;
+        friend class internal::allocate_root_proxy;
+        friend class internal::allocate_child_proxy;
+        friend class internal::allocate_continuation_proxy;
+        friend class internal::allocate_additional_child_of_proxy;
+
+#if __TBB_TASK_GROUP_CONTEXT
+        //! Shared context that is used to communicate asynchronous state changes
+        /** Currently it is used to broadcast cancellation requests generated both 
+            by users and as the result of unhandled exceptions in the task::execute()
+            methods. */
+        task_group_context  *context;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+        
+        //! The scheduler that allocated the task, or NULL if the task is big.
+        /** Small tasks are pooled by the scheduler that allocated the task.
+            If a scheduler needs to free a small task allocated by another scheduler,
+            it returns the task to that other scheduler.  This policy avoids
+            memory space blowup issues for memory allocators that allocate from 
+            thread-specific pools. */
+        scheduler* origin;
+
+        //! The scheduler that owns the task.
+        scheduler* owner;
+
+        //! The task whose reference count includes me.
+        /** In the "blocking style" of programming, this field points to the parent task.
+            In the "continuation-passing style" of programming, this field points to the
+            continuation of the parent. */
+        tbb::task* parent;
+
+        //! Reference count used for synchronization.
+        /** In the "continuation-passing style" of programming, this field is
+            the difference of the number of allocated children minus the
+            number of children that have completed.
+            In the "blocking style" of programming, this field is one more than the difference. */
+        reference_count ref_count;
+
+        //! Obsolete. Used to be scheduling depth before TBB 2.2
+        /** Retained only for the sake of backward binary compatibility. **/
+        int depth;
+
+        //! A task::state_type, stored as a byte for compactness.
+        /** This state is exposed to users via method task::state(). */
+        unsigned char state;
+
+        //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
+        /** 0x0 -> version 1.0 task
+            0x1 -> version >=2.1 task
+            0x20 -> task_proxy
+            0x40 -> task has live ref_count
+            0x80 -> a stolen task */
+        unsigned char extra_state;
+
+        affinity_id affinity;
+
+        //! "next" field for list of task
+        tbb::task* next;
+
+        //! The task corresponding to this task_prefix.
+        tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
+    };
+
+} // namespace internal
+//! @endcond
+
+#if __TBB_TASK_GROUP_CONTEXT
+
+#if TBB_USE_CAPTURED_EXCEPTION
+    class tbb_exception;
+#else
+    namespace internal {
+        class tbb_exception_ptr;
+    }
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+
+//! Used to form groups of tasks 
+/** @ingroup task_scheduling 
+    The context services explicit cancellation requests from user code, and unhandled 
+    exceptions intercepted during tasks execution. Intercepting an exception results 
+    in generating internal cancellation requests (which is processed in exactly the 
+    same way as external ones). 
+
+    The context is associated with one or more root tasks and defines the cancellation 
+    group that includes all the descendants of the corresponding root task(s). Association 
+    is established when a context object is passed as an argument to the task::allocate_root()
+    method. See task_group_context::task_group_context for more details.
+    
+    The context can be bound to another one, and other contexts can be bound to it,
+    forming a tree-like structure: parent -> this -> children. Arrows here designate
+    cancellation propagation direction. If a task in a cancellation group is canceled
+    all the other tasks in this group and groups bound to it (as children) get canceled too.
+
+    IMPLEMENTATION NOTE: 
+    When adding new members to task_group_context or changing types of existing ones, 
+    update the size of both padding buffers (_leading_padding and _trailing_padding)
+    appropriately. See also VERSIONING NOTE at the constructor definition below. **/
+class task_group_context : internal::no_copy {
+private:
+#if TBB_USE_CAPTURED_EXCEPTION
+    typedef tbb_exception exception_container_type;
+#else
+    typedef internal::tbb_exception_ptr exception_container_type;
+#endif
+
+    enum version_traits_word_layout {
+        traits_offset = 16,
+        version_mask = 0xFFFF,
+        traits_mask = 0xFFFFul << traits_offset
+    };
+
+public:
+    enum kind_type {
+        isolated,
+        bound
+    };
+
+    enum traits_type {
+        exact_exception = 0x0001ul << traits_offset,
+        concurrent_wait = 0x0004ul << traits_offset,
+#if TBB_USE_CAPTURED_EXCEPTION
+        default_traits = 0
+#else
+        default_traits = exact_exception
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+    };
+
+private:
+    union {
+        //! Flavor of this context: bound or isolated.
+        kind_type my_kind;
+        uintptr_t _my_kind_aligner;
+    };
+
+    //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
+    task_group_context *my_parent;
+
+    //! Used to form the thread specific list of contexts without additional memory allocation.
+    /** A context is included into the list of the current thread when its binding to 
+        its parent happens. Any context can be present in the list of one thread only. **/
+    internal::context_list_node_t my_node;
+
+    //! Used to set and maintain stack stitching point for Intel Performance Tools.
+    __itt_caller itt_caller;
+
+    //! Leading padding protecting accesses to frequently used members from false sharing.
+    /** Read accesses to the field my_cancellation_requested are on the hot path inside
+        the scheduler. This padding ensures that this field never shares the same cache 
+        line with a local variable that is frequently written to. **/
+    char _leading_padding[internal::NFS_MaxLineSize - 
+                    2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
+                          - sizeof(__itt_caller)];
+    
+    //! Specifies whether cancellation was request for this task group.
+    uintptr_t my_cancellation_requested;
+    
+    //! Version for run-time checks and behavioral traits of the context.
+    /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
+        from the traits_type enumerations) take the next 16 bits.
+        Original (zeroth) version of the context did not support any traits. **/
+    uintptr_t  my_version_and_traits;
+
+    //! Pointer to the container storing exception being propagated across this task group.
+    exception_container_type *my_exception;
+
+    //! Scheduler that registered this context in its thread specific list.
+    /** This field is not terribly necessary, but it allows to get a small performance 
+        benefit by getting us rid of using thread local storage. We do not care 
+        about extra memory it takes since this data structure is excessively padded anyway. **/
+    void *my_owner;
+
+    //! Trailing padding protecting accesses to frequently used members from false sharing
+    /** \sa _leading_padding **/
+    char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
+
+public:
+    //! Default & binding constructor.
+    /** By default a bound context is created. That is this context will be bound 
+        (as child) to the context of the task calling task::allocate_root(this_context) 
+        method. Cancellation requests passed to the parent context are propagated
+        to all the contexts bound to it.
+
+        If task_group_context::isolated is used as the argument, then the tasks associated
+        with this context will never be affected by events in any other context.
+        
+        Creating isolated contexts involve much less overhead, but they have limited
+        utility. Normally when an exception occurs in an algorithm that has nested
+        ones running, it is desirably to have all the nested algorithms canceled 
+        as well. Such a behavior requires nested algorithms to use bound contexts.
+        
+        There is one good place where using isolated algorithms is beneficial. It is
+        a master thread. That is if a particular algorithm is invoked directly from
+        the master thread (not from a TBB task), supplying it with explicitly 
+        created isolated context will result in a faster algorithm startup.
+        
+        VERSIONING NOTE: 
+        Implementation(s) of task_group_context constructor(s) cannot be made 
+        entirely out-of-line because the run-time version must be set by the user 
+        code. This will become critically important for binary compatibility, if 
+        we ever have to change the size of the context object.
+
+        Boosting the runtime version will also be necessary whenever new fields
+        are introduced in the currently unused padding areas or the meaning of 
+        the existing fields is changed or extended. **/
+    task_group_context ( kind_type relation_with_parent = bound,
+                         uintptr_t traits = default_traits )
+        : my_kind(relation_with_parent)
+        , my_version_and_traits(1 | traits)
+    {
+        init();
+    }
+
+    __TBB_EXPORTED_METHOD ~task_group_context ();
+
+    //! Forcefully reinitializes the context after the task tree it was associated with is completed.
+    /** Because the method assumes that all the tasks that used to be associated with 
+        this context have already finished, calling it while the context is still 
+        in use somewhere in the task hierarchy leads to undefined behavior.
+        
+        IMPORTANT: This method is not thread safe!
+
+        The method does not change the context's parent if it is set. **/ 
+    void __TBB_EXPORTED_METHOD reset ();
+
+    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
+    /** \return false if cancellation has already been requested, true otherwise. 
+
+        Note that canceling never fails. When false is returned, it just means that 
+        another thread (or this one) has already sent cancellation request to this
+        context or to one of its ancestors (if this context is bound). It is guaranteed
+        that when this method is concurrently called on the same not yet cancelled 
+        context, true will be returned by one and only one invocation. **/
+    bool __TBB_EXPORTED_METHOD cancel_group_execution ();
+
+    //! Returns true if the context received cancellation request.
+    bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
+
+    //! Records the pending exception, and cancels the task group.
+    /** May be called only from inside a catch-block. If the context is already 
+        canceled, does nothing. 
+        The method brings the task group associated with this context exactly into 
+        the state it would be in, if one of its tasks threw the currently pending 
+        exception during its execution. In other words, it emulates the actions 
+        of the scheduler's dispatch loop exception handler. **/
+    void __TBB_EXPORTED_METHOD register_pending_exception ();
+
+protected:
+    //! Out-of-line part of the constructor. 
+    /** Singled out to ensure backward binary compatibility of the future versions. **/
+    void __TBB_EXPORTED_METHOD init ();
+
+private:
+    friend class task;
+    friend class internal::allocate_root_with_context_proxy;
+
+    static const kind_type binding_required = bound;
+    static const kind_type binding_completed = kind_type(bound+1);
+    static const kind_type detached = kind_type(binding_completed+1);
+    static const kind_type dying = kind_type(detached+1);
+
+    //! Checks if any of the ancestors has a cancellation request outstanding, 
+    //! and propagates it back to descendants.
+    void propagate_cancellation_from_ancestors ();
+
+}; // class task_group_context
+
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+//! Base class for user-defined tasks.
+/** @ingroup task_scheduling */
+class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
+
+    //! Set reference count
+    void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
+
+    //! Decrement reference count and return its new value.
+    internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
+
+protected:
+    //! Default constructor.
+    task() {prefix().extra_state=1;}
+
+public:
+    //! Destructor.
+    virtual ~task() {}
+
+    //! Should be overridden by derived classes.
+    virtual task* execute() = 0;
+
+    //! Enumeration of task states that the scheduler considers.
+    enum state_type {
+        //! task is running, and will be destroyed after method execute() completes.
+        executing,
+        //! task to be rescheduled.
+        reexecute,
+        //! task is in ready pool, or is going to be put there, or was just taken off.
+        ready,
+        //! task object is freshly allocated or recycled.
+        allocated,
+        //! task object is on free list, or is going to be put there, or was just taken off.
+        freed,
+        //! task to be recycled as continuation
+        recycle 
+    };
+
+    //------------------------------------------------------------------------
+    // Allocating tasks
+    //------------------------------------------------------------------------
+
+    //! Returns proxy for overloaded new that allocates a root task.
+    static internal::allocate_root_proxy allocate_root() {
+        return internal::allocate_root_proxy();
+    }
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
+    static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
+        return internal::allocate_root_with_context_proxy(ctx);
+    }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+    //! Returns proxy for overloaded new that allocates a continuation task of *this.
+    /** The continuation's parent becomes the parent of *this. */
+    internal::allocate_continuation_proxy& allocate_continuation() {
+        return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
+    }
+
+    //! Returns proxy for overloaded new that allocates a child task of *this.
+    internal::allocate_child_proxy& allocate_child() {
+        return *reinterpret_cast<internal::allocate_child_proxy*>(this);
+    }
+
+    //! Define recommended static form via import from base class.
+    using task_base::allocate_additional_child_of;
+
+#if __TBB_DEPRECATED_TASK_INTERFACE
+    //! Destroy a task.
+    /** Usually, calling this method is unnecessary, because a task is
+        implicitly deleted after its execute() method runs.  However,
+        sometimes a task needs to be explicitly deallocated, such as
+        when a root task is used as the parent in spawn_and_wait_for_all. */
+    void __TBB_EXPORTED_METHOD destroy( task& t );
+#else /* !__TBB_DEPRECATED_TASK_INTERFACE */
+    //! Define recommended static form via import from base class.
+    using task_base::destroy;
+#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
+
+    //------------------------------------------------------------------------
+    // Recycling of tasks
+    //------------------------------------------------------------------------
+
+    //! Change this to be a continuation of its former self.
+    /** The caller must guarantee that the task's refcount does not become zero until
+        after the method execute() returns.  Typically, this is done by having
+        method execute() return a pointer to a child of the task.  If the guarantee
+        cannot be made, use method recycle_as_safe_continuation instead. 
+       
+        Because of the hazard, this method may be deprecated in the future. */
+    void recycle_as_continuation() {
+        __TBB_ASSERT( prefix().state==executing, "execute not running?" );
+        prefix().state = allocated;
+    }
+
+    //! Recommended to use, safe variant of recycle_as_continuation
+    /** For safety, it requires additional increment of ref_count.
+        With no decendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
+    void recycle_as_safe_continuation() {
+        __TBB_ASSERT( prefix().state==executing, "execute not running?" );
+        prefix().state = recycle;
+    }
+
+    //! Change this to be a child of new_parent.
+    void recycle_as_child_of( task& new_parent ) {
+        internal::task_prefix& p = prefix();
+        __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
+        __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
+        __TBB_ASSERT( p.parent==NULL, "parent must be null" );
+        __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
+        __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
+        p.state = allocated;
+        p.parent = &new_parent;
+#if __TBB_TASK_GROUP_CONTEXT
+        p.context = new_parent.prefix().context;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    }
+
+    //! Schedule this for reexecution after current execute() returns.
+    /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
+    void recycle_to_reexecute() {
+        __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
+        __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
+        prefix().state = reexecute;
+    }
+
+    // All depth-related methods are obsolete, and are retained for the sake 
+    // of backward source compatibility only
+    intptr_t depth() const {return 0;}
+    void set_depth( intptr_t ) {}
+    void add_to_depth( int ) {}
+
+
+    //------------------------------------------------------------------------
+    // Spawning and blocking
+    //------------------------------------------------------------------------
+
+    //! Set reference count
+    void set_ref_count( int count ) {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+        internal_set_ref_count(count);
+#else
+        prefix().ref_count = count;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+    }
+
+    //! Atomically increment reference count.
+    /** Has acquire semantics */  
+    void increment_ref_count() {
+        __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
+    }
+
+    //! Atomically decrement reference count.  
+    /** Has release semantics. */  
+    int decrement_ref_count() {
+#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
+        return int(internal_decrement_ref_count());
+#else
+        return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
+#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
+    }
+
+    //! Define recommended static forms via import from base class.
+    using task_base::spawn;
+
+    //! Similar to spawn followed by wait_for_all, but more efficient.
+    void spawn_and_wait_for_all( task& child ) {
+        prefix().owner->wait_for_all( *this, &child );
+    }
+
+    //! Similar to spawn followed by wait_for_all, but more efficient.
+    void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
+
+    //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
+    static void spawn_root_and_wait( task& root ) {
+        root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
+    }
+
+    //! Spawn root tasks on list and wait for all of them to finish.
+    /** If there are more tasks than worker threads, the tasks are spawned in
+        order of front to back. */
+    static void spawn_root_and_wait( task_list& root_list );
+
+    //! Wait for reference count to become one, and set reference count to zero.
+    /** Works on tasks while waiting. */
+    void wait_for_all() {
+        prefix().owner->wait_for_all( *this, NULL );
+    }
+
+#if __TBB_ARENA_PER_MASTER
+    //! Enqueue task for starvation-resistant execution.
+    static void enqueue( task& t ) {
+        t.prefix().owner->enqueue( t, NULL );
+    }
+
+#endif /* __TBB_ARENA_PER_MASTER */
+    //! The innermost task being executed or destroyed by the current thread at the moment.
+    static task& __TBB_EXPORTED_FUNC self();
+
+    //! task on whose behalf this task is working, or NULL if this is a root.
+    task* parent() const {return prefix().parent;}
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Shared context that is used to communicate asynchronous state changes
+    task_group_context* context() {return prefix().context;}
+#endif /* __TBB_TASK_GROUP_CONTEXT */   
+
+    //! True if task was stolen from the task pool of another thread.
+    bool is_stolen_task() const {
+        return (prefix().extra_state & 0x80)!=0;
+    }
+
+    //------------------------------------------------------------------------
+    // Debugging
+    //------------------------------------------------------------------------
+
+    //! Current execution state
+    state_type state() const {return state_type(prefix().state);}
+
+    //! The internal reference count.
+    int ref_count() const {
+#if TBB_USE_ASSERT
+        internal::reference_count ref_count_ = prefix().ref_count;
+        __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
+#endif
+        return int(prefix().ref_count);
+    }
+
+    //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
+    bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
+
+    //------------------------------------------------------------------------
+    // Affinity
+    //------------------------------------------------------------------------
+    //! An id as used for specifying affinity.
+    /** Guaranteed to be integral type.  Value of 0 means no affinity. */
+    typedef internal::affinity_id affinity_id;
+
+    //! Set affinity for this task.
+    void set_affinity( affinity_id id ) {prefix().affinity = id;}
+
+    //! Current affinity of this task
+    affinity_id affinity() const {return prefix().affinity;}
+
+    //! Invoked by scheduler to notify task that it ran on unexpected thread.
+    /** Invoked before method execute() runs, if task is stolen, or task has 
+        affinity but will be executed on another thread. 
+
+        The default action does nothing. */
+    virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
+    /** \return false if cancellation has already been requested, true otherwise. **/
+    bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
+
+    //! Returns true if the context received cancellation request.
+    bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+private:
+    friend class interface5::internal::task_base;
+    friend class task_list;
+    friend class internal::scheduler;
+    friend class internal::allocate_root_proxy;
+#if __TBB_TASK_GROUP_CONTEXT
+    friend class internal::allocate_root_with_context_proxy;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    friend class internal::allocate_continuation_proxy;
+    friend class internal::allocate_child_proxy;
+    friend class internal::allocate_additional_child_of_proxy;
+    
+    //! Get reference to corresponding task_prefix.
+    /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
+    internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
+        return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
+    }
+}; // class task
+
+//! task that does nothing.  Useful for synchronization.
+/** @ingroup task_scheduling */
+class empty_task: public task {
+    /*override*/ task* execute() {
+        return NULL;
+    }
+};
+
+//! A list of children.
+/** Used for method task::spawn_children
+    @ingroup task_scheduling */
+class task_list: internal::no_copy {
+private:
+    task* first;
+    task** next_ptr;
+    friend class task;
+    friend class interface5::internal::task_base;
+public:
+    //! Construct empty list
+    task_list() : first(NULL), next_ptr(&first) {}
+
+    //! Destroys the list, but does not destroy the task objects.
+    ~task_list() {}
+
+    //! True if list if empty; false otherwise.
+    bool empty() const {return !first;}
+
+    //! Push task onto back of list.
+    void push_back( task& task ) {
+        task.prefix().next = NULL;
+        *next_ptr = &task;
+        next_ptr = &task.prefix().next;
+    }
+
+    //! Pop the front task from the list.
+    task& pop_front() {
+        __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
+        task* result = first;
+        first = result->prefix().next;
+        if( !first ) next_ptr = &first;
+        return *result;
+    }
+
+    //! Clear the list
+    void clear() {
+        first=NULL;
+        next_ptr=&first;
+    }
+};
+
+inline void interface5::internal::task_base::spawn( task& t ) {
+    t.prefix().owner->spawn( t, t.prefix().next );
+}
+
+inline void interface5::internal::task_base::spawn( task_list& list ) {
+    if( task* t = list.first ) {
+        t->prefix().owner->spawn( *t, *list.next_ptr );
+        list.clear();
+    }
+}
+
+inline void task::spawn_root_and_wait( task_list& root_list ) {
+    if( task* t = root_list.first ) {
+        t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
+        root_list.clear();
+    }
+}
+
+} // namespace tbb
+
+inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
+    return &tbb::internal::allocate_root_proxy::allocate(bytes);
+}
+
+inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
+    tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
+    return &p.allocate(bytes);
+}
+
+inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
+    p.free( *static_cast<tbb::task*>(task) );
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
+    return &p.allocate(bytes);
+}
+
+inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
+    p.free( *static_cast<tbb::task*>(task) );
+}
+
+inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
+    return &p.allocate(bytes);
+}
+
+inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
+    p.free( *static_cast<tbb::task*>(task) );
+}
+
+inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
+    return &p.allocate(bytes);
+}
+
+inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
+    p.free( *static_cast<tbb::task*>(task) );
+}
+
+#endif /* __TBB_task_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_group.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_group.h
new file mode 100644 (file)
index 0000000..a74ccc7
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_task_group_H
+#define __TBB_task_group_H
+
+#include "task.h"
+#include "tbb_exception.h"
+
+namespace tbb {
+
+namespace internal {
+    template<typename F> class task_handle_task;
+}
+
+template<typename F>
+class task_handle : internal::no_assign {
+    template<typename _F> friend class internal::task_handle_task;
+
+    static const intptr_t scheduled = 0x1;
+
+    F my_func;
+    intptr_t my_state;
+
+    void mark_scheduled () {
+        // The check here is intentionally lax to avoid the impact of interlocked operation
+        if ( my_state & scheduled )
+            internal::throw_exception( internal::eid_invalid_multiple_scheduling );
+        my_state |= scheduled;
+    }
+public:
+    task_handle( const F& f ) : my_func(f), my_state(0) {}
+
+    void operator() () const { my_func(); }
+};
+
+enum task_group_status {
+    not_complete,
+    complete,
+    canceled
+};
+
+namespace internal {
+
+// Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task.
+//#pragma warning(disable: 588)
+
+template<typename F>
+class function_task : public task {
+    F my_func;
+    /*override*/ task* execute() {
+        my_func();
+        return NULL;
+    }
+public:
+    function_task( const F& f ) : my_func(f) {}
+};
+
+template<typename F>
+class task_handle_task : public task {
+    task_handle<F>& my_handle;
+    /*override*/ task* execute() {
+        my_handle();
+        return NULL;
+    }
+public:
+    task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
+};
+
+class task_group_base : internal::no_copy {
+protected:
+    empty_task* my_root;
+    task_group_context my_context;
+
+    task& owner () { return *my_root; }
+
+    template<typename F>
+    task_group_status internal_run_and_wait( F& f ) {
+        __TBB_TRY {
+            if ( !my_context.is_group_execution_cancelled() )
+                f();
+        } __TBB_CATCH( ... ) {
+            my_context.register_pending_exception();
+        }
+        return wait();
+    }
+
+    template<typename F, typename Task>
+    void internal_run( F& f ) {
+        owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) );
+    }
+
+public:
+    task_group_base( uintptr_t traits = 0 )
+        : my_context(task_group_context::bound, task_group_context::default_traits | traits)
+    {
+        my_root = new( task::allocate_root(my_context) ) empty_task;
+        my_root->set_ref_count(1);
+    }
+
+    ~task_group_base() {
+        if( my_root->ref_count() > 1 ) {
+            bool stack_unwinding_in_progress = std::uncaught_exception();
+            // Always attempt to do proper cleanup to avoid inevitable memory corruption 
+            // in case of missing wait (for the sake of better testability & debuggability)
+            if ( !is_canceling() )
+                cancel();
+            __TBB_TRY {
+                my_root->wait_for_all();
+            } __TBB_CATCH (...) {
+                task::destroy(*my_root);
+                __TBB_RETHROW();
+            }
+            task::destroy(*my_root);
+            if ( !stack_unwinding_in_progress )
+                internal::throw_exception( internal::eid_missing_wait );
+        }
+        else {
+            task::destroy(*my_root);
+        }
+    }
+
+    template<typename F>
+    void run( task_handle<F>& h ) {
+        internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
+    }
+
+    task_group_status wait() {
+        __TBB_TRY {
+            my_root->wait_for_all();
+        } __TBB_CATCH( ... ) {
+            my_context.reset();
+            __TBB_RETHROW();
+        }
+        if ( my_context.is_group_execution_cancelled() ) {
+            my_context.reset();
+            return canceled;
+        }
+        return complete;
+    }
+
+    bool is_canceling() {
+        return my_context.is_group_execution_cancelled();
+    }
+
+    void cancel() {
+        my_context.cancel_group_execution();
+    }
+}; // class task_group_base
+
+} // namespace internal
+
+class task_group : public internal::task_group_base {
+public:
+    task_group () : task_group_base( task_group_context::concurrent_wait ) {}
+
+#if TBB_DEPRECATED
+    ~task_group() __TBB_TRY {
+        __TBB_ASSERT( my_root->ref_count() != 0, NULL );
+        if( my_root->ref_count() > 1 )
+            my_root->wait_for_all();
+    }
+#if TBB_USE_EXCEPTIONS
+    catch (...) {
+        // Have to destroy my_root here as the base class destructor won't be called
+        task::destroy(*my_root);
+        throw;
+    }
+#endif /* TBB_USE_EXCEPTIONS */
+#endif /* TBB_DEPRECATED */
+
+#if __SUNPRO_CC
+    template<typename F>
+    void run( task_handle<F>& h ) {
+        internal_run< task_handle<F>, internal::task_handle_task<F> >( h );
+    }
+#else
+    using task_group_base::run;
+#endif
+
+    template<typename F>
+    void run( const F& f ) {
+        internal_run< const F, internal::function_task<F> >( f );
+    }
+
+    template<typename F>
+    task_group_status run_and_wait( const F& f ) {
+        return internal_run_and_wait<const F>( f );
+    }
+
+    template<typename F>
+    task_group_status run_and_wait( task_handle<F>& h ) {
+      return internal_run_and_wait< task_handle<F> >( h );
+    }
+}; // class task_group
+
+class structured_task_group : public internal::task_group_base {
+public:
+    template<typename F>
+    task_group_status run_and_wait ( task_handle<F>& h ) {
+        return internal_run_and_wait< task_handle<F> >( h );
+    }
+
+    task_group_status wait() {
+        task_group_status res = task_group_base::wait();
+        my_root->set_ref_count(1);
+        return res;
+    }
+}; // class structured_task_group
+
+inline 
+bool is_current_task_group_canceling() {
+    return task::self().is_cancelled();
+}
+
+template<class F>
+task_handle<F> make_task( const F& f ) {
+    return task_handle<F>( f );
+}
+
+} // namespace tbb
+
+#endif /* __TBB_task_group_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_init.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_init.h
new file mode 100644 (file)
index 0000000..458afb2
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_task_scheduler_init_H
+#define __TBB_task_scheduler_init_H
+
+#include "tbb_stddef.h"
+
+namespace tbb {
+
+typedef std::size_t stack_size_type;
+
+//! @cond INTERNAL
+namespace internal {
+    //! Internal to library. Should not be used by clients.
+    /** @ingroup task_scheduling */
+    class scheduler;
+} // namespace internal
+//! @endcond
+
+//! Class representing reference to tbb scheduler.
+/** A thread must construct a task_scheduler_init, and keep it alive,
+    during the time that it uses the services of class task.
+    @ingroup task_scheduling */
+class task_scheduler_init: internal::no_copy {
+    /** NULL if not currently initialized. */
+    internal::scheduler* my_scheduler;
+public:
+
+    //! Typedef for number of threads that is automatic.
+    static const int automatic = -1;
+
+    //! Argument to initialize() or constructor that causes initialization to be deferred.
+    static const int deferred = -2;
+
+    //! Ensure that scheduler exists for this thread
+    /** A value of -1 lets tbb decide on the number of threads, which is typically 
+        the number of hardware threads. For production code, the default value of -1 
+        should be used, particularly if the client code is mixed with third party clients 
+        that might also use tbb.
+
+        The number_of_threads is ignored if any other task_scheduler_inits 
+        currently exist.  A thread may construct multiple task_scheduler_inits.  
+        Doing so does no harm because the underlying scheduler is reference counted. */
+    void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic );
+
+    //! The overloaded method with stack size parameter
+    /** Overloading is necessary to preserve ABI compatibility */
+    void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size );
+
+    //! Inverse of method initialize.
+    void __TBB_EXPORTED_METHOD terminate();
+
+    //! Shorthand for default constructor followed by call to intialize(number_of_threads).
+    task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL)  {
+        initialize( number_of_threads, thread_stack_size );
+    }
+
+    //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits.
+    ~task_scheduler_init() {
+        if( my_scheduler ) 
+            terminate();
+        internal::poison_pointer( my_scheduler );
+    }
+    //! Returns the number of threads tbb scheduler would create if initialized by default.
+    /** Result returned by this method does not depend on whether the scheduler 
+        has already been initialized.
+        
+        Because tbb 2.0 does not support blocking tasks yet, you may use this method
+        to boost the number of threads in the tbb's internal pool, if your tasks are 
+        doing I/O operations. The optimal number of additional threads depends on how
+        much time your tasks spend in the blocked state. */
+    static int __TBB_EXPORTED_FUNC default_num_threads ();
+
+    //! Returns true if scheduler is active (initialized); false otherwise
+    bool is_active() const { return my_scheduler != NULL; }
+};
+
+} // namespace tbb
+
+#endif /* __TBB_task_scheduler_init_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_observer.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/task_scheduler_observer.h
new file mode 100644 (file)
index 0000000..61003e5
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_task_scheduler_observer_H
+#define __TBB_task_scheduler_observer_H
+
+#include "atomic.h"
+
+#if __TBB_SCHEDULER_OBSERVER
+
+namespace tbb {
+
+namespace internal {
+
+class observer_proxy;
+
+class task_scheduler_observer_v3 {
+    friend class observer_proxy;
+    observer_proxy* my_proxy;
+    atomic<intptr_t> my_busy_count;
+public:
+    //! Enable or disable observation
+    void __TBB_EXPORTED_METHOD observe( bool state=true );
+
+    //! True if observation is enables; false otherwise.
+    bool is_observing() const {return my_proxy!=NULL;}
+
+    //! Construct observer with observation disabled.
+    task_scheduler_observer_v3() : my_proxy(NULL) {my_busy_count=0;}
+
+    //! Called by thread before first steal since observation became enabled
+    virtual void on_scheduler_entry( bool /*is_worker*/ ) {} 
+
+    //! Called by thread when it no longer takes part in task stealing.
+    virtual void on_scheduler_exit( bool /*is_worker*/ ) {}
+
+    //! Destructor
+    virtual ~task_scheduler_observer_v3() {observe(false);}
+};
+
+} // namespace internal
+
+typedef internal::task_scheduler_observer_v3 task_scheduler_observer;
+
+} // namespace tbb
+
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+#endif /* __TBB_task_scheduler_observer_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb.h
new file mode 100644 (file)
index 0000000..9c5ac0f
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_H
+#define __TBB_tbb_H
+
+/** 
+    This header bulk-includes declarations or definitions of all the functionality 
+    provided by TBB (save for malloc dependent headers). 
+
+    If you use only a few TBB constructs, consider including specific headers only.
+    Any header listed below can be included independently of others.
+**/
+
+#include "aligned_space.h"
+#include "atomic.h"
+#include "blocked_range.h"
+#include "blocked_range2d.h"
+#include "blocked_range3d.h"
+#include "cache_aligned_allocator.h"
+#include "combinable.h"
+#include "concurrent_unordered_map.h"
+#include "concurrent_hash_map.h"
+#include "concurrent_queue.h"
+#include "concurrent_vector.h"
+#include "critical_section.h"
+#include "enumerable_thread_specific.h"
+#include "mutex.h"
+#include "null_mutex.h"
+#include "null_rw_mutex.h"
+#include "parallel_do.h"
+#include "parallel_for.h"
+#include "parallel_for_each.h"
+#include "parallel_invoke.h"
+#include "parallel_reduce.h"
+#include "parallel_scan.h"
+#include "parallel_sort.h"
+#include "partitioner.h"
+#include "pipeline.h"
+#include "queuing_mutex.h"
+#include "queuing_rw_mutex.h"
+#include "reader_writer_lock.h"
+#include "recursive_mutex.h"
+#include "spin_mutex.h"
+#include "spin_rw_mutex.h"
+#include "task.h"
+#include "task_group.h"
+#include "task_scheduler_init.h"
+#include "task_scheduler_observer.h"
+#include "tbb_allocator.h"
+#include "tbb_exception.h"
+#include "tbb_thread.h"
+#include "tick_count.h"
+
+#endif /* __TBB_tbb_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_allocator.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_allocator.h
new file mode 100644 (file)
index 0000000..008422d
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_allocator_H
+#define __TBB_tbb_allocator_H
+
+#include "tbb_stddef.h"
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! Deallocates memory using FreeHandler
+    /** The function uses scalable_free if scalable allocator is available and free if not*/
+    void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p );
+
+    //! Allocates memory using MallocHandler
+    /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/
+    void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n );
+
+    //! Returns true if standard malloc/free are used to work with memory.
+    bool __TBB_EXPORTED_FUNC is_malloc_used_v3();
+}
+//! @endcond
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for erroneous "unreferenced parameter" warning in method destroy.
+    #pragma warning (push)
+    #pragma warning (disable: 4100)
+#endif
+
+//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5
+/** The class selects the best memory allocation mechanism available 
+    from scalable_malloc and standard malloc.
+    The members are ordered the same way they are in section 20.4.1
+    of the ISO C++ standard.
+    @ingroup memory_allocation */
+template<typename T>
+class tbb_allocator {
+public:
+    typedef typename internal::allocator_type<T>::value_type value_type;
+    typedef value_type* pointer;
+    typedef const value_type* const_pointer;
+    typedef value_type& reference;
+    typedef const value_type& const_reference;
+    typedef size_t size_type;
+    typedef ptrdiff_t difference_type;
+    template<typename U> struct rebind {
+        typedef tbb_allocator<U> other;
+    };
+
+    //! Specifies current allocator
+    enum malloc_type {
+        scalable, 
+        standard
+    };
+
+    tbb_allocator() throw() {}
+    tbb_allocator( const tbb_allocator& ) throw() {}
+    template<typename U> tbb_allocator(const tbb_allocator<U>&) throw() {}
+
+    pointer address(reference x) const {return &x;}
+    const_pointer address(const_reference x) const {return &x;}
+    
+    //! Allocate space for n objects.
+    pointer allocate( size_type n, const void* /*hint*/ = 0) {
+        return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) ));
+    }
+
+    //! Free previously allocated block of memory.
+    void deallocate( pointer p, size_type ) {
+        internal::deallocate_via_handler_v3(p);        
+    }
+
+    //! Largest value for which method allocate might succeed.
+    size_type max_size() const throw() {
+        size_type max = static_cast<size_type>(-1) / sizeof (value_type);
+        return (max > 0 ? max : 1);
+    }
+    
+    //! Copy-construct value at location pointed to by p.
+    void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);}
+
+    //! Destroy value at location pointed to by p.
+    void destroy( pointer p ) {p->~value_type();}
+
+    //! Returns current allocator
+    static malloc_type allocator_type() {
+        return internal::is_malloc_used_v3() ? standard : scalable;
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warning 4100 is back
+
+//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1
+/** @ingroup memory_allocation */
+template<> 
+class tbb_allocator<void> {
+public:
+    typedef void* pointer;
+    typedef const void* const_pointer;
+    typedef void value_type;
+    template<typename U> struct rebind {
+        typedef tbb_allocator<U> other;
+    };
+};
+
+template<typename T, typename U>
+inline bool operator==( const tbb_allocator<T>&, const tbb_allocator<U>& ) {return true;}
+
+template<typename T, typename U>
+inline bool operator!=( const tbb_allocator<T>&, const tbb_allocator<U>& ) {return false;}
+
+//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5
+/** The class is an adapter over an actual allocator that fills the allocation
+    using memset function with template argument C as the value.
+    The members are ordered the same way they are in section 20.4.1
+    of the ISO C++ standard.
+    @ingroup memory_allocation */
+template <typename T, template<typename X> class Allocator = tbb_allocator>
+class zero_allocator : public Allocator<T>
+{
+public:
+    typedef Allocator<T> base_allocator_type;
+    typedef typename base_allocator_type::value_type value_type;
+    typedef typename base_allocator_type::pointer pointer;
+    typedef typename base_allocator_type::const_pointer const_pointer;
+    typedef typename base_allocator_type::reference reference;
+    typedef typename base_allocator_type::const_reference const_reference;
+    typedef typename base_allocator_type::size_type size_type;
+    typedef typename base_allocator_type::difference_type difference_type;
+    template<typename U> struct rebind {
+        typedef zero_allocator<U, Allocator> other;
+    };
+
+    zero_allocator() throw() { }
+    zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { }
+    template<typename U>
+    zero_allocator(const zero_allocator<U> &a) throw() : base_allocator_type( Allocator<U>( a ) ) { }
+
+    pointer allocate(const size_type n, const void *hint = 0 ) {
+        pointer ptr = base_allocator_type::allocate( n, hint );
+        std::memset( ptr, 0, n * sizeof(value_type) );
+        return ptr;
+    }
+};
+
+//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1
+/** @ingroup memory_allocation */
+template<template<typename T> class Allocator> 
+class zero_allocator<void, Allocator> : public Allocator<void> {
+public:
+    typedef Allocator<void> base_allocator_type;
+    typedef typename base_allocator_type::value_type value_type;
+    typedef typename base_allocator_type::pointer pointer;
+    typedef typename base_allocator_type::const_pointer const_pointer;
+    template<typename U> struct rebind {
+        typedef zero_allocator<U, Allocator> other;
+    };
+};
+
+template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>
+inline bool operator==( const zero_allocator<T1,B1> &a, const zero_allocator<T2,B2> &b) {
+    return static_cast< B1<T1> >(a) == static_cast< B2<T2> >(b);
+}
+template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>
+inline bool operator!=( const zero_allocator<T1,B1> &a, const zero_allocator<T2,B2> &b) {
+    return static_cast< B1<T1> >(a) != static_cast< B2<T2> >(b);
+}
+
+} // namespace tbb 
+
+#endif /* __TBB_tbb_allocator_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_config.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_config.h
new file mode 100644 (file)
index 0000000..a65fd15
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_config_H
+#define __TBB_tbb_config_H
+
+/** This header is supposed to contain macro definitions and C style comments only.
+    The macros defined here are intended to control such aspects of TBB build as 
+    - compilation modes
+    - feature sets
+    - workarounds presence 
+**/
+
+/** Compilation modes **/
+
+#ifndef TBB_USE_DEBUG
+#ifdef TBB_DO_ASSERT
+#define TBB_USE_DEBUG TBB_DO_ASSERT
+#else
+#define TBB_USE_DEBUG 0
+#endif /* TBB_DO_ASSERT */
+#else
+#define TBB_DO_ASSERT TBB_USE_DEBUG
+#endif /* TBB_USE_DEBUG */
+
+#ifndef TBB_USE_ASSERT
+#ifdef TBB_DO_ASSERT
+#define TBB_USE_ASSERT TBB_DO_ASSERT
+#else 
+#define TBB_USE_ASSERT TBB_USE_DEBUG
+#endif /* TBB_DO_ASSERT */
+#endif /* TBB_USE_ASSERT */
+
+#ifndef TBB_USE_THREADING_TOOLS
+#ifdef TBB_DO_THREADING_TOOLS
+#define TBB_USE_THREADING_TOOLS TBB_DO_THREADING_TOOLS
+#else 
+#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG
+#endif /* TBB_DO_THREADING_TOOLS */
+#endif /* TBB_USE_THREADING_TOOLS */
+
+#ifndef TBB_USE_PERFORMANCE_WARNINGS
+#ifdef TBB_PERFORMANCE_WARNINGS
+#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS
+#else 
+#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG
+#endif /* TBB_PEFORMANCE_WARNINGS */
+#endif /* TBB_USE_PERFORMANCE_WARNINGS */
+
+#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX)
+    #if TBB_USE_EXCEPTIONS
+        #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0.
+    #elif !defined(TBB_USE_EXCEPTIONS)
+        #define TBB_USE_EXCEPTIONS 0
+    #endif
+#elif !defined(TBB_USE_EXCEPTIONS)
+    #define TBB_USE_EXCEPTIONS 1
+#endif
+
+#ifndef TBB_IMPLEMENT_CPP0X
+    /** By default, use C++0x classes if available **/
+    #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__
+        #define TBB_IMPLEMENT_CPP0X 0
+    #else
+        #define TBB_IMPLEMENT_CPP0X 1
+    #endif
+#endif /* TBB_IMPLEMENT_CPP0X */
+
+/** Feature sets **/
+
+#ifndef __TBB_COUNT_TASK_NODES
+    #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT
+#endif
+
+#ifndef __TBB_TASK_GROUP_CONTEXT
+    #define __TBB_TASK_GROUP_CONTEXT 1
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#ifndef __TBB_SCHEDULER_OBSERVER
+    #define __TBB_SCHEDULER_OBSERVER 1
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+#ifndef __TBB_ARENA_PER_MASTER
+    #define __TBB_ARENA_PER_MASTER 1
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && (_WIN32 || _WIN64 || __linux__)
+    #define __TBB_SURVIVE_THREAD_SWITCH 1
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+
+/* TODO: The following condition should be extended as soon as new compilers/runtimes 
+         with std::exception_ptr support appear. */
+#define __TBB_EXCEPTION_PTR_PRESENT  (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && (__GNUC__==4 && __GNUC_MINOR__>=4))
+
+
+#ifndef TBB_USE_CAPTURED_EXCEPTION
+    #if __TBB_EXCEPTION_PTR_PRESENT
+        #define TBB_USE_CAPTURED_EXCEPTION 0
+    #else
+        #define TBB_USE_CAPTURED_EXCEPTION 1
+    #endif
+#else /* defined TBB_USE_CAPTURED_EXCEPTION */
+    #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT
+        #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception.
+    #endif
+#endif /* defined TBB_USE_CAPTURED_EXCEPTION */
+
+
+#ifndef __TBB_DEFAULT_PARTITIONER
+#if TBB_DEPRECATED
+/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */
+#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner
+#else
+/** Default partitioner for parallel loop templates in TBB 2.2 */
+#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner
+#endif /* TBB_DEFAULT_PARTITIONER */
+#endif /* !defined(__TBB_DEFAULT_PARTITIONER */
+
+/** Workarounds presence **/
+
+#if __GNUC__==4 && __GNUC_MINOR__>=4 && !defined(__INTEL_COMPILER)
+    #define __TBB_GCC_WARNING_SUPPRESSION_ENABLED 1
+#endif
+
+/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by
+    the bugs in compilers, standard or OS specific libraries. They should be 
+    removed as soon as the corresponding bugs are fixed or the buggy OS/compiler
+    versions go out of the support list. 
+**/
+
+#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012)
+    /** Necessary to avoid ICL error (or warning in non-strict mode): 
+        "exception specification for implicitly declared virtual destructor is 
+        incompatible with that of overridden one". **/
+    #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER)
+    /** VS2005 and earlier do not allow declaring template class as a friend 
+        of classes defined in other namespaces. **/
+    #define __TBB_TEMPLATE_FRIENDS_BROKEN 1
+#endif
+
+#if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__
+    //! Macro controlling EH usages in TBB tests
+    /** Some older versions of glibc crash when exception handling happens concurrently. **/
+    #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1
+#endif
+
+#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110
+    /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/
+    #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1
+#endif
+
+#if __GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER)
+    /** A bug in GCC 3.3 with access to nested classes declared in protected area */
+    #define __TBB_GCC_3_3_PROTECTED_BROKEN 1
+#endif
+
+#if __MINGW32__ && (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__<2)
+    /** MinGW has a bug with stack alignment for routines invoked from MS RTLs.
+        Since GCC 4.2, the bug can be worked around via a special attribute. **/
+    #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1
+#endif
+
+#if __FreeBSD__
+    /** A bug in FreeBSD 8.0 results in kernel panic when there is contention 
+        on a mutex created with this attribute. **/
+    #define __TBB_PRIO_INHERIT_BROKEN 1
+
+    /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs 
+        during (concurrent?) object construction by means of placement new operator. **/
+    #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1
+#endif /* __FreeBSD__ */
+
+#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER)
+    /** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates 
+        incorrect code when __asm__ arguments have a cast to volatile. **/
+    #define __TBB_ICC_ASM_VOLATILE_BROKEN 1
+#endif
+
+#endif /* __TBB_tbb_config_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_exception.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_exception.h
new file mode 100644 (file)
index 0000000..d8ae898
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_exception_H
+#define __TBB_exception_H
+
+#include "tbb_stddef.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+#include <string> // required to construct std exception classes
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+namespace tbb {
+
+//! Exception for concurrent containers
+class bad_last_alloc : public std::bad_alloc {
+public:
+    /*override*/ const char* what() const throw();
+#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN
+    /*override*/ ~bad_last_alloc() throw() {}
+#endif
+};
+
+//! Exception for PPL locks
+class improper_lock : public std::exception {
+public:
+    /*override*/ const char* what() const throw();
+};
+
+//! Exception for missing wait on structured_task_group
+class missing_wait : public std::exception {
+public:
+    /*override*/ const char* what() const throw();
+};
+
+//! Exception for repeated scheduling of the same task_handle 
+class invalid_multiple_scheduling : public std::exception {
+public:
+    /*override*/ const char* what() const throw();
+};
+
+namespace internal {
+//! Obsolete
+void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4();
+
+enum exception_id {
+    eid_bad_alloc = 1,
+    eid_bad_last_alloc,
+    eid_nonpositive_step,
+    eid_out_of_range,
+    eid_segment_range_error,
+    eid_index_range_error,
+    eid_missing_wait,
+    eid_invalid_multiple_scheduling,
+    eid_improper_lock,
+    eid_possible_deadlock,
+    eid_operation_not_permitted,
+    eid_condvar_wait_failed,
+    eid_invalid_load_factor,
+    eid_reserved, // free slot for backward compatibility, can be reused.
+    eid_invalid_swap,
+    eid_reservation_length_error,
+    eid_invalid_key,
+    //! The last enumerator tracks the number of defined IDs. It must remain the last one.
+    /** When adding new IDs, place them immediately _before_ this comment (that is
+        _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/
+    eid_max
+};
+
+//! Gathers all throw operators in one place.
+/** Its purpose is to minimize code bloat that can be caused by throw operators 
+    scattered in multiple places, especially in templates. **/
+void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id );
+
+//! Versionless convenience wrapper for throw_exception_v4()
+inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); }
+
+} // namespace internal
+} // namespace tbb
+
+#if __TBB_TASK_GROUP_CONTEXT
+#include "tbb_allocator.h"
+#include <exception>
+#include <typeinfo>
+#include <new>
+
+namespace tbb {
+
+//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.
+/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted
+    by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in
+    the root thread. The root thread is the thread that has started the outermost algorithm 
+    or root task sharing the same task_group_context with the guilty algorithm/task (the one
+    that threw the exception first).
+    
+    Note: when documentation mentions workers with respect to exception handling, 
+    masters are implied as well, because they are completely equivalent in this context.
+    Consequently a root thread can be master or worker thread. 
+
+    NOTE: In case of nested algorithms or complex task hierarchies when the nested 
+    levels share (explicitly or by means of implicit inheritance) the task group 
+    context of the outermost level, the exception may be (re-)thrown multiple times 
+    (ultimately - in each worker on each nesting level) before reaching the root 
+    thread at the outermost level. IMPORTANT: if you intercept an exception derived 
+    from this class on a nested level, you must re-throw it in the catch block by means
+    of the "throw;" operator. 
+    
+    TBB provides two implementations of this interface: tbb::captured_exception and 
+    template class tbb::movable_exception. See their declarations for more info. **/
+class tbb_exception : public std::exception
+{
+    /** No operator new is provided because the TBB usage model assumes dynamic 
+        creation of the TBB exception objects only by means of applying move()
+        operation on an exception thrown out of TBB scheduler. **/
+    void* operator new ( size_t );
+
+public:
+    //! Creates and returns pointer to the deep copy of this exception object. 
+    /** Move semantics is allowed. **/
+    virtual tbb_exception* move () throw() = 0;
+    
+    //! Destroys objects created by the move() method.
+    /** Frees memory and calls destructor for this exception object. 
+        Can and must be used only on objects created by the move method. **/
+    virtual void destroy () throw() = 0;
+
+    //! Throws this exception object.
+    /** Make sure that if you have several levels of derivation from this interface
+        you implement or override this method on the most derived level. The implementation 
+        is as simple as "throw *this;". Failure to do this will result in exception 
+        of a base class type being thrown. **/
+    virtual void throw_self () = 0;
+
+    //! Returns RTTI name of the originally intercepted exception
+    virtual const char* name() const throw() = 0;
+
+    //! Returns the result of originally intercepted exception's what() method.
+    virtual const char* what() const throw() = 0;
+
+    /** Operator delete is provided only to allow using existing smart pointers
+        with TBB exception objects obtained as the result of applying move()
+        operation on an exception thrown out of TBB scheduler. 
+        
+        When overriding method move() make sure to override operator delete as well
+        if memory is allocated not by TBB's scalable allocator. **/
+    void operator delete ( void* p ) {
+        internal::deallocate_via_handler_v3(p);
+    }
+};
+
+//! This class is used by TBB to propagate information about unhandled exceptions into the root thread.
+/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel 
+    algorithm ) if an unhandled exception was intercepted during the algorithm execution in one 
+    of the workers.
+    \sa tbb::tbb_exception **/
+class captured_exception : public tbb_exception
+{
+public:
+    captured_exception ( const captured_exception& src )
+        : tbb_exception(src), my_dynamic(false)
+    {
+        set(src.my_exception_name, src.my_exception_info);
+    }
+
+    captured_exception ( const char* name_, const char* info )
+        : my_dynamic(false)
+    {
+        set(name_, info);
+    }
+
+    __TBB_EXPORTED_METHOD ~captured_exception () throw() {
+        clear();
+    }
+
+    captured_exception& operator= ( const captured_exception& src ) {
+        if ( this != &src ) {
+            clear();
+            set(src.my_exception_name, src.my_exception_info);
+        }
+        return *this;
+    }
+
+    /*override*/ 
+    captured_exception* __TBB_EXPORTED_METHOD move () throw();
+
+    /*override*/ 
+    void __TBB_EXPORTED_METHOD destroy () throw();
+
+    /*override*/ 
+    void throw_self () { __TBB_THROW(*this); }
+
+    /*override*/ 
+    const char* __TBB_EXPORTED_METHOD name() const throw();
+
+    /*override*/ 
+    const char* __TBB_EXPORTED_METHOD what() const throw();
+
+    void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw();
+    void __TBB_EXPORTED_METHOD clear () throw();
+
+private:
+    //! Used only by method clone().  
+    captured_exception() {}
+
+    //! Functionally equivalent to {captured_exception e(name,info); return e.clone();}
+    static captured_exception* allocate ( const char* name, const char* info );
+
+    bool my_dynamic;
+    const char* my_exception_name;
+    const char* my_exception_info;
+};
+
+//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread
+/** Code using TBB can instantiate this template with an arbitrary ExceptionData type 
+    and throw this exception object. Such exceptions are intercepted by the TBB scheduler
+    and delivered to the root thread (). 
+    \sa tbb::tbb_exception **/
+template<typename ExceptionData>
+class movable_exception : public tbb_exception
+{
+    typedef movable_exception<ExceptionData> self_type;
+
+public:
+    movable_exception ( const ExceptionData& data_ ) 
+        : my_exception_data(data_)
+        , my_dynamic(false)
+        , my_exception_name(
+#if TBB_USE_EXCEPTIONS
+        typeid(self_type).name()
+#else /* !TBB_USE_EXCEPTIONS */
+        "movable_exception"
+#endif /* !TBB_USE_EXCEPTIONS */
+        )
+    {}
+
+    movable_exception ( const movable_exception& src ) throw () 
+        : tbb_exception(src)
+        , my_exception_data(src.my_exception_data)
+        , my_dynamic(false)
+        , my_exception_name(src.my_exception_name)
+    {}
+
+    ~movable_exception () throw() {}
+
+    const movable_exception& operator= ( const movable_exception& src ) {
+        if ( this != &src ) {
+            my_exception_data = src.my_exception_data;
+            my_exception_name = src.my_exception_name;
+        }
+        return *this;
+    }
+
+    ExceptionData& data () throw() { return my_exception_data; }
+
+    const ExceptionData& data () const throw() { return my_exception_data; }
+
+    /*override*/ const char* name () const throw() { return my_exception_name; }
+
+    /*override*/ const char* what () const throw() { return "tbb::movable_exception"; }
+
+    /*override*/ 
+    movable_exception* move () throw() {
+        void* e = internal::allocate_via_handler_v3(sizeof(movable_exception));
+        if ( e ) {
+            ::new (e) movable_exception(*this);
+            ((movable_exception*)e)->my_dynamic = true;
+        }
+        return (movable_exception*)e;
+    }
+    /*override*/ 
+    void destroy () throw() {
+        __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" );
+        if ( my_dynamic ) {
+            this->~movable_exception();
+            internal::deallocate_via_handler_v3(this);
+        }
+    }
+    /*override*/ 
+    void throw_self () { __TBB_THROW( *this ); }
+
+protected:
+    //! User data
+    ExceptionData  my_exception_data;
+
+private:
+    //! Flag specifying whether this object has been dynamically allocated (by the move method)
+    bool my_dynamic;
+
+    //! RTTI name of this class
+    /** We rely on the fact that RTTI names are static string constants. **/
+    const char* my_exception_name;
+};
+
+#if !TBB_USE_CAPTURED_EXCEPTION
+namespace internal {
+
+//! Exception container that preserves the exact copy of the original exception
+/** This class can be used only when the appropriate runtime support (mandated 
+    by C++0x) is present **/
+class tbb_exception_ptr {
+    std::exception_ptr  my_ptr;
+
+public:
+    static tbb_exception_ptr* allocate ();
+    static tbb_exception_ptr* allocate ( const tbb_exception& tag );
+    //! This overload uses move semantics (i.e. it empties src)
+    static tbb_exception_ptr* allocate ( captured_exception& src );
+    
+    //! Destroys this objects
+    /** Note that objects of this type can be created only by the allocate() method. **/
+    void destroy () throw();
+
+    //! Throws the contained exception .
+    void throw_self () { std::rethrow_exception(my_ptr); }
+
+private:
+    tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {}
+    tbb_exception_ptr ( const captured_exception& src ) : my_ptr(std::copy_exception(src)) {}
+}; // class tbb::internal::tbb_exception_ptr
+
+} // namespace internal
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+
+} // namespace tbb
+
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#endif /* __TBB_exception_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_machine.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_machine.h
new file mode 100644 (file)
index 0000000..0f22d0b
--- /dev/null
@@ -0,0 +1,691 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_machine_H
+#define __TBB_machine_H
+
+#include "tbb_stddef.h"
+
+#if _WIN32||_WIN64
+
+#ifdef _MANAGED
+#pragma managed(push, off)
+#endif
+
+#if __MINGW64__
+#include "machine/linux_intel64.h"
+extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
+#define __TBB_Yield()  SwitchToThread()
+#elif __MINGW32__
+#include "machine/linux_ia32.h"
+extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
+#define __TBB_Yield()  SwitchToThread()
+#elif defined(_M_IX86)
+#include "machine/windows_ia32.h"
+#elif defined(_M_AMD64) 
+#include "machine/windows_intel64.h"
+#elif _XBOX 
+#include "machine/xbox360_ppc.h"
+#endif
+
+#ifdef _MANAGED
+#pragma managed(pop)
+#endif
+
+#elif __linux__ || __FreeBSD__
+
+#if __i386__
+#include "machine/linux_ia32.h"
+#elif __x86_64__
+#include "machine/linux_intel64.h"
+#elif __ia64__
+#include "machine/linux_ia64.h"
+#elif __powerpc__
+#include "machine/mac_ppc.h"
+#endif
+#include "machine/linux_common.h"
+
+#elif __APPLE__
+
+#if __i386__
+#include "machine/linux_ia32.h"
+#elif __x86_64__
+#include "machine/linux_intel64.h"
+#elif __POWERPC__
+#include "machine/mac_ppc.h"
+#endif
+#include "machine/macos_common.h"
+
+#elif _AIX
+
+#include "machine/ibm_aix51.h"
+
+#elif __sun || __SUNPRO_CC
+
+#define __asm__ asm 
+#define __volatile__ volatile
+#if __i386  || __i386__
+#include "machine/linux_ia32.h"
+#elif __x86_64__
+#include "machine/linux_intel64.h"
+#elif __sparc
+#include "machine/sunos_sparc.h"
+#endif
+#include <sched.h>
+#define __TBB_Yield() sched_yield()
+
+#endif
+
+//! Prerequisites for each architecture port
+/** There are no generic implementation for these macros so they have to be implemented
+    in each machine architecture specific header.
+
+    __TBB_full_memory_fence must prevent all memory operations from being reordered 
+    across the fence. And all such fences must be totally ordered (or sequentially 
+    consistent). These fence must affect both compiler and hardware.
+    
+    __TBB_release_consistency_helper is used to enforce guarantees of acquire or 
+    release semantics in generic implementations of __TBB_load_with_acquire and 
+    __TBB_store_with_release below. Depending on the particular combination of
+    architecture+compiler it can be a hardware fence, a compiler fence, both or
+    nothing. **/
+#if    !defined(__TBB_CompareAndSwap4) \
+    || !defined(__TBB_CompareAndSwap8) \
+    || !defined(__TBB_Yield)           \
+    || !defined(__TBB_full_memory_fence)    \
+    || !defined(__TBB_release_consistency_helper)
+#error Minimal requirements for tbb_machine.h not satisfied; platform is not supported.
+#endif
+
+#ifndef __TBB_Pause
+    inline void __TBB_Pause(int32_t) {
+        __TBB_Yield();
+    }
+#endif
+
+namespace tbb {
+namespace internal {
+
+//! Class that implements exponential backoff.
+/** See implementation of spin_wait_while_eq for an example. */
+class atomic_backoff : no_copy {
+    //! Time delay, in units of "pause" instructions. 
+    /** Should be equal to approximately the number of "pause" instructions
+        that take the same time as an context switch. */
+    static const int32_t LOOPS_BEFORE_YIELD = 16;
+    int32_t count;
+public:
+    atomic_backoff() : count(1) {}
+
+    //! Pause for a while.
+    void pause() {
+        if( count<=LOOPS_BEFORE_YIELD ) {
+            __TBB_Pause(count);
+            // Pause twice as long the next time.
+            count*=2;
+        } else {
+            // Pause is so long that we might as well yield CPU to scheduler.
+            __TBB_Yield();
+        }
+    }
+
+    // pause for a few times and then return false immediately.
+    bool bounded_pause() {
+        if( count<=LOOPS_BEFORE_YIELD ) {
+            __TBB_Pause(count);
+            // Pause twice as long the next time.
+            count*=2;
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    void reset() {
+        count = 1;
+    }
+};
+
+//! Spin WHILE the value of the variable is equal to a given value
+/** T and U should be comparable types. */
+template<typename T, typename U>
+void spin_wait_while_eq( const volatile T& location, U value ) {
+    atomic_backoff backoff;
+    while( location==value ) backoff.pause();
+}
+
+//! Spin UNTIL the value of the variable is equal to a given value
+/** T and U should be comparable types. */
+template<typename T, typename U>
+void spin_wait_until_eq( const volatile T& location, const U value ) {
+    atomic_backoff backoff;
+    while( location!=value ) backoff.pause();
+}
+
+// T should be unsigned, otherwise sign propagation will break correctness of bit manipulations.
+// S should be either 1 or 2, for the mask calculation to work correctly.
+// Together, these rules limit applicability of Masked CAS to unsigned char and unsigned short.
+template<size_t S, typename T>
+inline T __TBB_MaskedCompareAndSwap (volatile T *ptr, T value, T comparand ) {
+    volatile uint32_t * base = (uint32_t*)( (uintptr_t)ptr & ~(uintptr_t)0x3 );
+#if __TBB_BIG_ENDIAN
+    const uint8_t bitoffset = uint8_t( 8*( 4-S - (uintptr_t(ptr) & 0x3) ) );
+#else
+    const uint8_t bitoffset = uint8_t( 8*((uintptr_t)ptr & 0x3) );
+#endif
+    const uint32_t mask = ( (1<<(S*8)) - 1 )<<bitoffset;
+    atomic_backoff b;
+    uint32_t result;
+    for(;;) {
+        result = *base; // reload the base value which might change during the pause
+        uint32_t old_value = ( result & ~mask ) | ( comparand << bitoffset );
+        uint32_t new_value = ( result & ~mask ) | ( value << bitoffset );
+        // __TBB_CompareAndSwap4 presumed to have full fence. 
+        result = __TBB_CompareAndSwap4( base, new_value, old_value );
+        if(  result==old_value               // CAS succeeded
+          || ((result^old_value)&mask)!=0 )  // CAS failed and the bits of interest have changed
+            break;
+        else                                 // CAS failed but the bits of interest left unchanged
+            b.pause();
+    }
+    return T((result & mask) >> bitoffset);
+}
+
+template<size_t S, typename T>
+inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ) { 
+    return __TBB_CompareAndSwapW((T *)ptr,value,comparand);
+}
+
+template<>
+inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr, uint8_t value, uint8_t comparand ) {
+#ifdef __TBB_CompareAndSwap1
+    return __TBB_CompareAndSwap1(ptr,value,comparand);
+#else
+    return __TBB_MaskedCompareAndSwap<1,uint8_t>((volatile uint8_t *)ptr,value,comparand);
+#endif
+}
+
+template<>
+inline uint16_t __TBB_CompareAndSwapGeneric <2,uint16_t> (volatile void *ptr, uint16_t value, uint16_t comparand ) {
+#ifdef __TBB_CompareAndSwap2
+    return __TBB_CompareAndSwap2(ptr,value,comparand);
+#else
+    return __TBB_MaskedCompareAndSwap<2,uint16_t>((volatile uint16_t *)ptr,value,comparand);
+#endif
+}
+
+template<>
+inline uint32_t __TBB_CompareAndSwapGeneric <4,uint32_t> (volatile void *ptr, uint32_t value, uint32_t comparand ) { 
+    return __TBB_CompareAndSwap4(ptr,value,comparand);
+}
+
+template<>
+inline uint64_t __TBB_CompareAndSwapGeneric <8,uint64_t> (volatile void *ptr, uint64_t value, uint64_t comparand ) { 
+    return __TBB_CompareAndSwap8(ptr,value,comparand);
+}
+
+template<size_t S, typename T>
+inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) {
+    atomic_backoff b;
+    T result;
+    for(;;) {
+        result = *reinterpret_cast<volatile T *>(ptr);
+        // __TBB_CompareAndSwapGeneric presumed to have full fence. 
+        if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, result+addend, result )==result ) 
+            break;
+        b.pause();
+    }
+    return result;
+}
+
+template<size_t S, typename T>
+inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) {
+    atomic_backoff b;
+    T result;
+    for(;;) {
+        result = *reinterpret_cast<volatile T *>(ptr);
+        // __TBB_CompareAndSwapGeneric presumed to have full fence.
+        if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, value, result )==result ) 
+            break;
+        b.pause();
+    }
+    return result;
+}
+
+// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as 
+// strict as type T.  Type type should have a trivial default constructor and destructor, so that
+// arrays of that type can be declared without initializers.  
+// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands
+// to a type bigger than T.
+// The default definition here works on machines where integers are naturally aligned and the
+// strictest alignment is 16.
+#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict
+
+#if __GNUC__ || __SUNPRO_CC
+struct __TBB_machine_type_with_strictest_alignment {
+    int member[4];
+} __attribute__((aligned(16)));
+#elif _MSC_VER
+__declspec(align(16)) struct __TBB_machine_type_with_strictest_alignment {
+    int member[4];
+};
+#else
+#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) or __TBB_machine_type_with_strictest_alignment
+#endif
+
+template<size_t N> struct type_with_alignment {__TBB_machine_type_with_strictest_alignment member;};
+template<> struct type_with_alignment<1> { char member; };
+template<> struct type_with_alignment<2> { uint16_t member; };
+template<> struct type_with_alignment<4> { uint32_t member; };
+template<> struct type_with_alignment<8> { uint64_t member; };
+
+#if _MSC_VER||defined(__GNUC__)&&__GNUC__==3 && __GNUC_MINOR__<=2  
+//! Work around for bug in GNU 3.2 and MSVC compilers.
+/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated.
+    The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */
+template<size_t Size, typename T> 
+struct work_around_alignment_bug {
+#if _MSC_VER
+    static const size_t alignment = __alignof(T);
+#else
+    static const size_t alignment = __alignof__(T);
+#endif
+};
+#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<tbb::internal::work_around_alignment_bug<sizeof(T),T>::alignment>
+#elif __GNUC__ || __SUNPRO_CC
+#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__alignof__(T)>
+#else
+#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) __TBB_machine_type_with_strictest_alignment
+#endif
+#endif  /* ____TBB_TypeWithAlignmentAtLeastAsStrict */
+
+// Template class here is to avoid instantiation of the static data for modules that don't use it
+template<typename T>
+struct reverse {
+    static const T byte_table[256];
+};
+// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed
+// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost.
+template<typename T>
+const T reverse<T>::byte_table[256] = {
+    0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
+    0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
+    0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
+    0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
+    0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
+    0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
+    0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
+    0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
+    0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
+    0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
+    0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
+    0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
+    0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
+    0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
+    0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
+    0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
+};
+
+} // namespace internal
+} // namespace tbb
+
+#ifndef __TBB_CompareAndSwap1
+#define __TBB_CompareAndSwap1 tbb::internal::__TBB_CompareAndSwapGeneric<1,uint8_t>
+#endif
+
+#ifndef __TBB_CompareAndSwap2 
+#define __TBB_CompareAndSwap2 tbb::internal::__TBB_CompareAndSwapGeneric<2,uint16_t>
+#endif
+
+#ifndef __TBB_CompareAndSwapW
+#define __TBB_CompareAndSwapW tbb::internal::__TBB_CompareAndSwapGeneric<sizeof(ptrdiff_t),ptrdiff_t>
+#endif
+
+#ifndef __TBB_FetchAndAdd1
+#define __TBB_FetchAndAdd1 tbb::internal::__TBB_FetchAndAddGeneric<1,uint8_t>
+#endif
+
+#ifndef __TBB_FetchAndAdd2
+#define __TBB_FetchAndAdd2 tbb::internal::__TBB_FetchAndAddGeneric<2,uint16_t>
+#endif
+
+#ifndef __TBB_FetchAndAdd4
+#define __TBB_FetchAndAdd4 tbb::internal::__TBB_FetchAndAddGeneric<4,uint32_t>
+#endif
+
+#ifndef __TBB_FetchAndAdd8
+#define __TBB_FetchAndAdd8 tbb::internal::__TBB_FetchAndAddGeneric<8,uint64_t>
+#endif
+
+#ifndef __TBB_FetchAndAddW
+#define __TBB_FetchAndAddW tbb::internal::__TBB_FetchAndAddGeneric<sizeof(ptrdiff_t),ptrdiff_t>
+#endif
+
+#ifndef __TBB_FetchAndStore1
+#define __TBB_FetchAndStore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,uint8_t>
+#endif
+
+#ifndef __TBB_FetchAndStore2
+#define __TBB_FetchAndStore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,uint16_t>
+#endif
+
+#ifndef __TBB_FetchAndStore4
+#define __TBB_FetchAndStore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,uint32_t>
+#endif
+
+#ifndef __TBB_FetchAndStore8
+#define __TBB_FetchAndStore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,uint64_t>
+#endif
+
+#ifndef __TBB_FetchAndStoreW
+#define __TBB_FetchAndStoreW tbb::internal::__TBB_FetchAndStoreGeneric<sizeof(ptrdiff_t),ptrdiff_t>
+#endif
+
+#if __TBB_DECL_FENCED_ATOMICS
+
+#ifndef __TBB_CompareAndSwap1__TBB_full_fence
+#define __TBB_CompareAndSwap1__TBB_full_fence __TBB_CompareAndSwap1
+#endif 
+#ifndef __TBB_CompareAndSwap1acquire
+#define __TBB_CompareAndSwap1acquire __TBB_CompareAndSwap1__TBB_full_fence
+#endif 
+#ifndef __TBB_CompareAndSwap1release
+#define __TBB_CompareAndSwap1release __TBB_CompareAndSwap1__TBB_full_fence
+#endif 
+
+#ifndef __TBB_CompareAndSwap2__TBB_full_fence
+#define __TBB_CompareAndSwap2__TBB_full_fence __TBB_CompareAndSwap2
+#endif
+#ifndef __TBB_CompareAndSwap2acquire
+#define __TBB_CompareAndSwap2acquire __TBB_CompareAndSwap2__TBB_full_fence
+#endif
+#ifndef __TBB_CompareAndSwap2release
+#define __TBB_CompareAndSwap2release __TBB_CompareAndSwap2__TBB_full_fence
+#endif
+
+#ifndef __TBB_CompareAndSwap4__TBB_full_fence
+#define __TBB_CompareAndSwap4__TBB_full_fence __TBB_CompareAndSwap4
+#endif 
+#ifndef __TBB_CompareAndSwap4acquire
+#define __TBB_CompareAndSwap4acquire __TBB_CompareAndSwap4__TBB_full_fence
+#endif 
+#ifndef __TBB_CompareAndSwap4release
+#define __TBB_CompareAndSwap4release __TBB_CompareAndSwap4__TBB_full_fence
+#endif 
+
+#ifndef __TBB_CompareAndSwap8__TBB_full_fence
+#define __TBB_CompareAndSwap8__TBB_full_fence __TBB_CompareAndSwap8
+#endif
+#ifndef __TBB_CompareAndSwap8acquire
+#define __TBB_CompareAndSwap8acquire __TBB_CompareAndSwap8__TBB_full_fence
+#endif
+#ifndef __TBB_CompareAndSwap8release
+#define __TBB_CompareAndSwap8release __TBB_CompareAndSwap8__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndAdd1__TBB_full_fence
+#define __TBB_FetchAndAdd1__TBB_full_fence __TBB_FetchAndAdd1
+#endif
+#ifndef __TBB_FetchAndAdd1acquire
+#define __TBB_FetchAndAdd1acquire __TBB_FetchAndAdd1__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndAdd1release
+#define __TBB_FetchAndAdd1release __TBB_FetchAndAdd1__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndAdd2__TBB_full_fence
+#define __TBB_FetchAndAdd2__TBB_full_fence __TBB_FetchAndAdd2
+#endif
+#ifndef __TBB_FetchAndAdd2acquire
+#define __TBB_FetchAndAdd2acquire __TBB_FetchAndAdd2__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndAdd2release
+#define __TBB_FetchAndAdd2release __TBB_FetchAndAdd2__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndAdd4__TBB_full_fence
+#define __TBB_FetchAndAdd4__TBB_full_fence __TBB_FetchAndAdd4
+#endif
+#ifndef __TBB_FetchAndAdd4acquire
+#define __TBB_FetchAndAdd4acquire __TBB_FetchAndAdd4__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndAdd4release
+#define __TBB_FetchAndAdd4release __TBB_FetchAndAdd4__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndAdd8__TBB_full_fence
+#define __TBB_FetchAndAdd8__TBB_full_fence __TBB_FetchAndAdd8
+#endif
+#ifndef __TBB_FetchAndAdd8acquire
+#define __TBB_FetchAndAdd8acquire __TBB_FetchAndAdd8__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndAdd8release
+#define __TBB_FetchAndAdd8release __TBB_FetchAndAdd8__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndStore1__TBB_full_fence
+#define __TBB_FetchAndStore1__TBB_full_fence __TBB_FetchAndStore1
+#endif
+#ifndef __TBB_FetchAndStore1acquire
+#define __TBB_FetchAndStore1acquire __TBB_FetchAndStore1__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndStore1release
+#define __TBB_FetchAndStore1release __TBB_FetchAndStore1__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndStore2__TBB_full_fence
+#define __TBB_FetchAndStore2__TBB_full_fence __TBB_FetchAndStore2
+#endif
+#ifndef __TBB_FetchAndStore2acquire
+#define __TBB_FetchAndStore2acquire __TBB_FetchAndStore2__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndStore2release
+#define __TBB_FetchAndStore2release __TBB_FetchAndStore2__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndStore4__TBB_full_fence
+#define __TBB_FetchAndStore4__TBB_full_fence __TBB_FetchAndStore4
+#endif
+#ifndef __TBB_FetchAndStore4acquire
+#define __TBB_FetchAndStore4acquire __TBB_FetchAndStore4__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndStore4release
+#define __TBB_FetchAndStore4release __TBB_FetchAndStore4__TBB_full_fence
+#endif
+
+#ifndef __TBB_FetchAndStore8__TBB_full_fence
+#define __TBB_FetchAndStore8__TBB_full_fence __TBB_FetchAndStore8
+#endif
+#ifndef __TBB_FetchAndStore8acquire
+#define __TBB_FetchAndStore8acquire __TBB_FetchAndStore8__TBB_full_fence
+#endif
+#ifndef __TBB_FetchAndStore8release
+#define __TBB_FetchAndStore8release __TBB_FetchAndStore8__TBB_full_fence
+#endif
+
+#endif // __TBB_DECL_FENCED_ATOMICS
+
+// Special atomic functions
+#ifndef __TBB_FetchAndAddWrelease
+#define __TBB_FetchAndAddWrelease __TBB_FetchAndAddW
+#endif
+
+#ifndef __TBB_FetchAndIncrementWacquire
+#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
+#endif
+
+#ifndef __TBB_FetchAndDecrementWrelease
+#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1))
+#endif
+
+template <typename T, size_t S>
+struct __TBB_machine_load_store {
+    static inline T load_with_acquire(const volatile T& location) {
+        T to_return = location;
+        __TBB_release_consistency_helper();
+        return to_return;
+    }
+
+    static inline void store_with_release(volatile T &location, T value) {
+        __TBB_release_consistency_helper();
+        location = value;
+    }
+};
+
+#if __TBB_WORDSIZE==4
+#if _MSC_VER
+using tbb::internal::int64_t;
+#endif
+// On 32-bit platforms, there should be definition of __TBB_Store8 and __TBB_Load8
+#ifndef __TBB_Store8
+inline void __TBB_Store8 (volatile void *ptr, int64_t value) {
+    for(;;) {
+        int64_t result = *(int64_t *)ptr;
+        if( __TBB_CompareAndSwap8(ptr,value,result)==result ) break;
+    }
+}
+#endif
+
+#ifndef __TBB_Load8
+inline int64_t __TBB_Load8 (const volatile void *ptr) {
+    const int64_t anyvalue = 3264; // Could be anything, just the same for comparand and new value
+    return __TBB_CompareAndSwap8(const_cast<volatile void *>(ptr),anyvalue,anyvalue);
+}
+#endif
+
+template <typename T>
+struct __TBB_machine_load_store<T,8> {
+    static inline T load_with_acquire(const volatile T& location) {
+        T to_return = (T)__TBB_Load8((const volatile void*)&location);
+        __TBB_release_consistency_helper();
+        return to_return;
+    }
+
+    static inline void store_with_release(volatile T& location, T value) {
+        __TBB_release_consistency_helper();
+        __TBB_Store8((volatile void *)&location,(int64_t)value);
+    }
+};
+#endif /* __TBB_WORDSIZE==4 */
+
+#ifndef __TBB_load_with_acquire
+template<typename T>
+inline T __TBB_load_with_acquire(const volatile T &location) {
+    return __TBB_machine_load_store<T,sizeof(T)>::load_with_acquire(location);
+}
+#endif
+
+#ifndef __TBB_store_with_release
+template<typename T, typename V>
+inline void __TBB_store_with_release(volatile T& location, V value) {
+    __TBB_machine_load_store<T,sizeof(T)>::store_with_release(location,T(value));
+}
+//! Overload that exists solely to avoid /Wp64 warnings.
+inline void __TBB_store_with_release(volatile size_t& location, size_t value) {
+    __TBB_machine_load_store<size_t,sizeof(size_t)>::store_with_release(location,value);
+}
+#endif
+
+#ifndef __TBB_Log2
+inline intptr_t __TBB_Log2( uintptr_t x ) {
+    if( x==0 ) return -1;
+    intptr_t result = 0;
+    uintptr_t tmp;
+#if __TBB_WORDSIZE>=8
+    if( (tmp = x>>32) ) { x=tmp; result += 32; }
+#endif
+    if( (tmp = x>>16) ) { x=tmp; result += 16; }
+    if( (tmp = x>>8) )  { x=tmp; result += 8; }
+    if( (tmp = x>>4) )  { x=tmp; result += 4; }
+    if( (tmp = x>>2) )  { x=tmp; result += 2; }
+    return (x&2)? result+1: result;
+}
+#endif
+
+#ifndef __TBB_AtomicOR
+inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) {
+    tbb::internal::atomic_backoff b;
+    for(;;) {
+        uintptr_t tmp = *(volatile uintptr_t *)operand;
+        uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp);
+        if( result==tmp ) break;
+        b.pause();
+    }
+}
+#endif
+
+#ifndef __TBB_AtomicAND
+inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) {
+    tbb::internal::atomic_backoff b;
+    for(;;) {
+        uintptr_t tmp = *(volatile uintptr_t *)operand;
+        uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp);
+        if( result==tmp ) break;
+        b.pause();
+    }
+}
+#endif
+
+#ifndef __TBB_TryLockByte
+inline bool __TBB_TryLockByte( unsigned char &flag ) {
+    return __TBB_CompareAndSwap1(&flag,1,0)==0;
+}
+#endif
+
+#ifndef __TBB_LockByte
+inline uintptr_t __TBB_LockByte( unsigned char& flag ) {
+    if ( !__TBB_TryLockByte(flag) ) {
+        tbb::internal::atomic_backoff b;
+        do {
+            b.pause();
+        } while ( !__TBB_TryLockByte(flag) );
+    }
+    return 0;
+}
+#endif
+
+#ifndef __TBB_ReverseByte
+inline unsigned char __TBB_ReverseByte(unsigned char src) {
+    return tbb::internal::reverse<unsigned char>::byte_table[src];
+}
+#endif
+
+template<typename T>
+T __TBB_ReverseBits(T src)
+{
+    T dst;
+    unsigned char *original = (unsigned char *) &src;
+    unsigned char *reversed = (unsigned char *) &dst;
+
+    for( int i = sizeof(T)-1; i >= 0; i-- )
+        reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] );
+
+    return dst;
+}
+
+#endif /* __TBB_machine_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_profiling.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_profiling.h
new file mode 100644 (file)
index 0000000..c3bbb51
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_profiling_H
+#define __TBB_profiling_H
+
+// Check if the tools support is enabled
+#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS
+
+#if _WIN32||_WIN64
+#include <stdlib.h>  /* mbstowcs_s */
+#endif
+#include "tbb_stddef.h"
+
+namespace tbb {
+    namespace internal {
+#if _WIN32||_WIN64
+        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); 
+        inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) {
+#if _MSC_VER>=1400
+            size_t len;
+            mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE );
+            return len;   // mbstowcs_s counts null terminator
+#else
+            size_t len = mbstowcs( wcs, mbs, bufsize );
+            if(wcs && len!=size_t(-1) )
+                wcs[len<bufsize-1? len: bufsize-1] = wchar_t('\0');
+            return len+1; // mbstowcs does not count null terminator
+#endif
+        }
+#else
+        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const char* name ); 
+#endif
+    } // namespace internal
+} // namespace tbb
+
+//! Macro __TBB_DEFINE_PROFILING_SET_NAME(T) defines "set_name" methods for sync objects of type T
+/** Should be used in the "tbb" namespace only. 
+    Don't place semicolon after it to avoid compiler warnings. **/
+#if _WIN32||_WIN64
+    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \
+        namespace profiling {                                                       \
+            inline void set_name( sync_object_type& obj, const wchar_t* name ) {    \
+                tbb::internal::itt_set_sync_name_v3( &obj, name );                  \
+            }                                                                       \
+            inline void set_name( sync_object_type& obj, const char* name ) {       \
+                size_t len = tbb::internal::multibyte_to_widechar(NULL, name, 0);   \
+                wchar_t *wname = new wchar_t[len];                                  \
+                tbb::internal::multibyte_to_widechar(wname, name, len);             \
+                set_name( obj, wname );                                             \
+                delete[] wname;                                                     \
+            }                                                                       \
+        }
+#else /* !WIN */
+    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \
+        namespace profiling {                                                       \
+            inline void set_name( sync_object_type& obj, const char* name ) {       \
+                tbb::internal::itt_set_sync_name_v3( &obj, name );                  \
+            }                                                                       \
+        }
+#endif /* !WIN */
+
+#else /* no tools support */
+
+#if _WIN32||_WIN64
+    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \
+        namespace profiling {                                               \
+            inline void set_name( sync_object_type&, const wchar_t* ) {}    \
+            inline void set_name( sync_object_type&, const char* ) {}       \
+        }
+#else /* !WIN */
+    #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)    \
+        namespace profiling {                                               \
+            inline void set_name( sync_object_type&, const char* ) {}       \
+        }
+#endif /* !WIN */
+
+#endif /* no tools support */
+
+#endif /* __TBB_profiling_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_stddef.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_stddef.h
new file mode 100644 (file)
index 0000000..0016c6d
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_stddef_H
+#define __TBB_tbb_stddef_H
+
+// Marketing-driven product version
+#define TBB_VERSION_MAJOR 3
+#define TBB_VERSION_MINOR 0
+
+// Engineering-focused interface version
+#define TBB_INTERFACE_VERSION 5003
+#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000
+
+// The oldest major interface version still supported
+// To be used in SONAME, manifests, etc.
+#define TBB_COMPATIBLE_INTERFACE_VERSION 2
+
+#define __TBB_STRING_AUX(x) #x
+#define __TBB_STRING(x) __TBB_STRING_AUX(x)
+
+// We do not need defines below for resource processing on windows
+#if !defined RC_INVOKED
+
+// Define groups for Doxygen documentation
+/**
+ * @defgroup algorithms         Algorithms
+ * @defgroup containers         Containers
+ * @defgroup memory_allocation  Memory Allocation
+ * @defgroup synchronization    Synchronization
+ * @defgroup timing             Timing
+ * @defgroup task_scheduling    Task Scheduling
+ */
+
+// Simple text that is displayed on the main page of Doxygen documentation.
+/**
+ * \mainpage Main Page
+ *
+ * Click the tabs above for information about the
+ * - <a href="./modules.html">Modules</a> (groups of functionality) implemented by the library 
+ * - <a href="./annotated.html">Classes</a> provided by the library
+ * - <a href="./files.html">Files</a> constituting the library.
+ * .
+ * Please note that significant part of TBB functionality is implemented in the form of
+ * template functions, descriptions of which are not accessible on the <a href="./annotated.html">Classes</a>
+ * tab. Use <a href="./modules.html">Modules</a> or <a href="./namespacemembers.html">Namespace/Namespace Members</a>
+ * tabs to find them.
+ *
+ * Additional pieces of information can be found here
+ * - \subpage concepts
+ * .
+ */
+
+/** \page concepts TBB concepts
+    
+    A concept is a set of requirements to a type, which are necessary and sufficient
+    for the type to model a particular behavior or a set of behaviors. Some concepts 
+    are specific to a particular algorithm (e.g. algorithm body), while other ones 
+    are common to several algorithms (e.g. range concept). 
+
+    All TBB algorithms make use of different classes implementing various concepts.
+    Implementation classes are supplied by the user as type arguments of template 
+    parameters and/or as objects passed as function call arguments. The library 
+    provides predefined  implementations of some concepts (e.g. several kinds of 
+    \ref range_req "ranges"), while other ones must always be implemented by the user. 
+    
+    TBB defines a set of minimal requirements each concept must conform to. Here is 
+    the list of different concepts hyperlinked to the corresponding requirements specifications:
+    - \subpage range_req
+    - \subpage parallel_do_body_req
+    - \subpage parallel_for_body_req
+    - \subpage parallel_reduce_body_req
+    - \subpage parallel_scan_body_req
+    - \subpage parallel_sort_iter_req
+**/
+
+// Define preprocessor symbols used to determine architecture
+#if _WIN32||_WIN64
+#   if defined(_M_AMD64)
+#       define __TBB_x86_64 1
+#   elif defined(_M_IA64)
+#       define __TBB_ipf 1
+#   elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support
+#       define __TBB_x86_32 1
+#   endif
+#else /* Assume generic Unix */
+#   if !__linux__ && !__APPLE__
+#       define __TBB_generic_os 1
+#   endif
+#   if __x86_64__
+#       define __TBB_x86_64 1
+#   elif __ia64__
+#       define __TBB_ipf 1
+#   elif __i386__||__i386  // __i386 is for Sun OS
+#       define __TBB_x86_32 1
+#   else
+#       define __TBB_generic_arch 1
+#   endif
+#endif
+
+#if _MSC_VER
+// define the parts of stdint.h that are needed, but put them inside tbb::internal
+namespace tbb {
+namespace internal {
+    typedef __int8 int8_t;
+    typedef __int16 int16_t;
+    typedef __int32 int32_t;
+    typedef __int64 int64_t;
+    typedef unsigned __int8 uint8_t;
+    typedef unsigned __int16 uint16_t;
+    typedef unsigned __int32 uint32_t;
+    typedef unsigned __int64 uint64_t;
+} // namespace internal
+} // namespace tbb
+#else
+#include <stdint.h>
+#endif /* _MSC_VER */
+
+#if _MSC_VER >=1400
+#define __TBB_EXPORTED_FUNC   __cdecl
+#define __TBB_EXPORTED_METHOD __thiscall
+#else
+#define __TBB_EXPORTED_FUNC
+#define __TBB_EXPORTED_METHOD
+#endif
+
+#include <cstddef>      /* Need size_t and ptrdiff_t */
+
+#if _MSC_VER
+#define __TBB_tbb_windef_H
+#include "_tbb_windef.h"
+#undef __TBB_tbb_windef_H
+#endif
+
+#include "tbb_config.h"
+
+//! The namespace tbb contains all components of the library.
+namespace tbb {
+
+using std::size_t; using std::ptrdiff_t;
+
+    //! Type for an assertion handler
+    typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment );
+
+#if TBB_USE_ASSERT
+
+//! Assert that x is true.
+/** If x is false, print assertion failure message.  
+    If the comment argument is not NULL, it is printed as part of the failure message.  
+    The comment argument has no other effect. */
+#define __TBB_ASSERT(predicate,message) ((predicate)?((void)0):tbb::assertion_failure(__FILE__,__LINE__,#predicate,message))
+#define __TBB_ASSERT_EX __TBB_ASSERT
+
+    //! Set assertion handler and return previous value of it.
+    assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler );
+
+    //! Process an assertion failure.
+    /** Normally called from __TBB_ASSERT macro.
+        If assertion handler is null, print message for assertion failure and abort.
+        Otherwise call the assertion handler. */
+    void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment );
+
+#else
+
+//! No-op version of __TBB_ASSERT.
+#define __TBB_ASSERT(predicate,comment) ((void)0)
+//! "Extended" version is useful to suppress warnings if a variable is only used with an assert
+#define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate)))
+
+#endif /* TBB_USE_ASSERT */
+
+//! The function returns the interface version of the TBB shared library being used.
+/**
+ * The version it returns is determined at runtime, not at compile/link time.
+ * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time.
+ */
+extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version();
+
+//! Dummy type that distinguishes splitting constructor from copy constructor.
+/**
+ * See description of parallel_for and parallel_reduce for example usages.
+ * @ingroup algorithms
+ */
+class split {
+};
+
+/**
+ * @cond INTERNAL
+ * @brief Identifiers declared inside namespace internal should never be used directly by client code.
+ */
+namespace internal {
+
+//! Compile-time constant that is upper bound on cache line/sector size.
+/** It should be used only in situations where having a compile-time upper 
+    bound is more useful than a run-time exact answer.
+    @ingroup memory_allocation */
+const size_t NFS_MaxLineSize = 128;
+
+template<class T, int S>
+struct padded_base : T {
+    char pad[NFS_MaxLineSize - sizeof(T) % NFS_MaxLineSize];
+};
+template<class T> struct padded_base<T, 0> : T {};
+
+//! Pads type T to fill out to a multiple of cache line size.
+template<class T>
+struct padded : padded_base<T, sizeof(T)> {};
+
+//! Extended variant of the standard offsetof macro
+/** The standard offsetof macro is not sufficient for TBB as it can be used for
+    POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/
+#define __TBB_offsetof(class_name, member_name) \
+    ((ptrdiff_t)&(reinterpret_cast<class_name*>(0x1000)->member_name) - 0x1000)
+
+//! Returns address of the object containing a member with the given name and address
+#define __TBB_get_object_ref(class_name, member_name, member_addr) \
+    (*reinterpret_cast<class_name*>((char*)member_addr - __TBB_offsetof(class_name, member_name)))
+
+//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info
+void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info );
+
+#if TBB_USE_EXCEPTIONS
+    #define __TBB_TRY try
+    #define __TBB_CATCH(e) catch(e)
+    #define __TBB_THROW(e) throw e
+    #define __TBB_RETHROW() throw
+#else /* !TBB_USE_EXCEPTIONS */
+    inline bool __TBB_false() { return false; }
+    #define __TBB_TRY
+    #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() )
+    #define __TBB_THROW(e) ((void)0)
+    #define __TBB_RETHROW() ((void)0)
+#endif /* !TBB_USE_EXCEPTIONS */
+
+//! Report a runtime warning.
+void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... );
+
+#if TBB_USE_ASSERT
+static void* const poisoned_ptr = reinterpret_cast<void*>(-1);
+
+//! Set p to invalid pointer value.
+template<typename T>
+inline void poison_pointer( T*& p ) { p = reinterpret_cast<T*>(poisoned_ptr); }
+
+/** Expected to be used in assertions only, thus no empty form is defined. **/
+template<typename T>
+inline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned_ptr); }
+#else
+template<typename T>
+inline void poison_pointer( T* ) {/*do nothing*/}
+#endif /* !TBB_USE_ASSERT */
+
+//! Cast pointer from U* to T.
+/** This method should be used sparingly as a last resort for dealing with 
+    situations that inherently break strict ISO C++ aliasing rules. */
+template<typename T, typename U> 
+inline T punned_cast( U* ptr ) {
+    uintptr_t x = reinterpret_cast<uintptr_t>(ptr);
+    return reinterpret_cast<T>(x);
+}
+
+//! Base class for types that should not be assigned.
+class no_assign {
+    // Deny assignment
+    void operator=( const no_assign& );
+public:
+#if __GNUC__
+    //! Explicitly define default construction, because otherwise gcc issues gratuitous warning.
+    no_assign() {}
+#endif /* __GNUC__ */
+};
+
+//! Base class for types that should not be copied or assigned.
+class no_copy: no_assign {
+    //! Deny copy construction
+    no_copy( const no_copy& );
+public:
+    //! Allow default construction
+    no_copy() {}
+};
+
+//! Class for determining type of std::allocator<T>::value_type.
+template<typename T>
+struct allocator_type {
+    typedef T value_type;
+};
+
+#if _MSC_VER
+//! Microsoft std::allocator has non-standard extension that strips const from a type. 
+template<typename T>
+struct allocator_type<const T> {
+    typedef T value_type;
+};
+#endif
+
+// Struct to be used as a version tag for inline functions.
+/** Version tag can be necessary to prevent loader on Linux from using the wrong 
+    symbol in debug builds (when inline functions are compiled as out-of-line). **/
+struct version_tag_v3 {};
+
+typedef version_tag_v3 version_tag;
+
+} // internal
+//! @endcond
+
+} // tbb
+
+#endif /* RC_INVOKED */
+#endif /* __TBB_tbb_stddef_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_thread.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbb_thread.h
new file mode 100644 (file)
index 0000000..14ee61e
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_thread_H
+#define __TBB_tbb_thread_H
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI
+#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* )
+#else
+#define __TBB_NATIVE_THREAD_ROUTINE void*
+#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* )
+#include <pthread.h>
+#endif // _WIN32||_WIN64
+
+#include "tbb_stddef.h"
+#include "tick_count.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <iosfwd>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+namespace tbb {
+
+//! @cond INTERNAL
+namespace internal {
+    
+    class tbb_thread_v3;
+
+} // namespace internal
+
+void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ); 
+
+namespace internal {
+
+    //! Allocate a closure
+    void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size );
+    //! Free a closure allocated by allocate_closure_v3
+    void __TBB_EXPORTED_FUNC free_closure_v3( void* );
+   
+    struct thread_closure_base {
+        void* operator new( size_t size ) {return allocate_closure_v3(size);}
+        void operator delete( void* ptr ) {free_closure_v3(ptr);}
+    };
+
+    template<class F> struct thread_closure_0: thread_closure_base {
+        F function;
+
+        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {
+            thread_closure_0 *self = static_cast<thread_closure_0*>(c);
+            self->function();
+            delete self;
+            return 0;
+        }
+        thread_closure_0( const F& f ) : function(f) {}
+    };
+    //! Structure used to pass user function with 1 argument to thread.  
+    template<class F, class X> struct thread_closure_1: thread_closure_base {
+        F function;
+        X arg1;
+        //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll
+        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {
+            thread_closure_1 *self = static_cast<thread_closure_1*>(c);
+            self->function(self->arg1);
+            delete self;
+            return 0;
+        }
+        thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {}
+    };
+    template<class F, class X, class Y> struct thread_closure_2: thread_closure_base {
+        F function;
+        X arg1;
+        Y arg2;
+        //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll
+        static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) {
+            thread_closure_2 *self = static_cast<thread_closure_2*>(c);
+            self->function(self->arg1, self->arg2);
+            delete self;
+            return 0;
+        }
+        thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {}
+    };
+
+    //! Versioned thread class.
+    class tbb_thread_v3 {
+        tbb_thread_v3(const tbb_thread_v3&); // = delete;   // Deny access
+    public:
+#if _WIN32||_WIN64
+        typedef HANDLE native_handle_type; 
+#else
+        typedef pthread_t native_handle_type; 
+#endif // _WIN32||_WIN64
+
+        class id;
+        //! Constructs a thread object that does not represent a thread of execution. 
+        tbb_thread_v3() : my_handle(0)
+#if _WIN32||_WIN64
+            , my_thread_id(0)
+#endif // _WIN32||_WIN64
+        {}
+        
+        //! Constructs an object and executes f() in a new thread
+        template <class F> explicit tbb_thread_v3(F f) {
+            typedef internal::thread_closure_0<F> closure_type;
+            internal_start(closure_type::start_routine, new closure_type(f));
+        }
+        //! Constructs an object and executes f(x) in a new thread
+        template <class F, class X> tbb_thread_v3(F f, X x) {
+            typedef internal::thread_closure_1<F,X> closure_type;
+            internal_start(closure_type::start_routine, new closure_type(f,x));
+        }
+        //! Constructs an object and executes f(x,y) in a new thread
+        template <class F, class X, class Y> tbb_thread_v3(F f, X x, Y y) {
+            typedef internal::thread_closure_2<F,X,Y> closure_type;
+            internal_start(closure_type::start_routine, new closure_type(f,x,y));
+        }
+
+        tbb_thread_v3& operator=(tbb_thread_v3& x) {
+            if (joinable()) detach();
+            my_handle = x.my_handle;
+            x.my_handle = 0;
+#if _WIN32||_WIN64
+            my_thread_id = x.my_thread_id;
+            x.my_thread_id = 0;
+#endif // _WIN32||_WIN64
+            return *this;
+        }
+        void swap( tbb_thread_v3& t ) {tbb::swap( *this, t );}
+        bool joinable() const {return my_handle!=0; }
+        //! The completion of the thread represented by *this happens before join() returns.
+        void __TBB_EXPORTED_METHOD join();
+        //! When detach() returns, *this no longer represents the possibly continuing thread of execution.
+        void __TBB_EXPORTED_METHOD detach();
+        ~tbb_thread_v3() {if( joinable() ) detach();}
+        inline id get_id() const;
+        native_handle_type native_handle() { return my_handle; }
+    
+        //! The number of hardware thread contexts.
+        static unsigned __TBB_EXPORTED_FUNC hardware_concurrency();
+    private:
+        native_handle_type my_handle; 
+#if _WIN32||_WIN64
+        DWORD my_thread_id;
+#endif // _WIN32||_WIN64
+
+        /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */
+        void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), 
+                             void* closure );
+        friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 );
+        friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ); 
+    };
+        
+    class tbb_thread_v3::id { 
+#if _WIN32||_WIN64
+        DWORD my_id;
+        id( DWORD id_ ) : my_id(id_) {}
+#else
+        pthread_t my_id;
+        id( pthread_t id_ ) : my_id(id_) {}
+#endif // _WIN32||_WIN64
+        friend class tbb_thread_v3;
+    public:
+        id() : my_id(0) {}
+
+        friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y );
+        
+        template<class charT, class traits>
+        friend std::basic_ostream<charT, traits>&
+        operator<< (std::basic_ostream<charT, traits> &out, 
+                    tbb_thread_v3::id id)
+        {
+            out << id.my_id;
+            return out;
+        }
+        friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
+    }; // tbb_thread_v3::id
+
+    tbb_thread_v3::id tbb_thread_v3::get_id() const {
+#if _WIN32||_WIN64
+        return id(my_thread_id);
+#else
+        return id(my_handle);
+#endif // _WIN32||_WIN64
+    }
+    void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 );
+    tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3();
+    void __TBB_EXPORTED_FUNC thread_yield_v3();
+    void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i);
+
+    inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id == y.my_id;
+    }
+    inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id != y.my_id;
+    }
+    inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id < y.my_id;
+    }
+    inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id <= y.my_id;
+    }
+    inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id > y.my_id;
+    }
+    inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y)
+    {
+        return x.my_id >= y.my_id;
+    }
+
+} // namespace internal;
+
+//! Users reference thread class by name tbb_thread
+typedef internal::tbb_thread_v3 tbb_thread;
+
+using internal::operator==;
+using internal::operator!=;
+using internal::operator<;
+using internal::operator>;
+using internal::operator<=;
+using internal::operator>=;
+
+inline void move( tbb_thread& t1, tbb_thread& t2 ) {
+    internal::move_v3(t1, t2);
+}
+
+inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) {
+    tbb::tbb_thread::native_handle_type h = t1.my_handle;
+    t1.my_handle = t2.my_handle;
+    t2.my_handle = h;
+#if _WIN32||_WIN64
+    DWORD i = t1.my_thread_id;
+    t1.my_thread_id = t2.my_thread_id;
+    t2.my_thread_id = i;
+#endif /* _WIN32||_WIN64 */
+}
+
+namespace this_tbb_thread {
+    inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); }
+    //! Offers the operating system the opportunity to schedule another thread.
+    inline void yield() { internal::thread_yield_v3(); }
+    //! The current thread blocks at least until the time specified.
+    inline void sleep(const tick_count::interval_t &i) { 
+        internal::thread_sleep_v3(i);  
+    }
+}  // namespace this_tbb_thread
+
+} // namespace tbb
+
+#endif /* __TBB_tbb_thread_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbbmalloc_proxy.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tbbmalloc_proxy.h
new file mode 100644 (file)
index 0000000..f15ca12
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/*
+Replacing the standard memory allocation routines in Microsoft* C/C++ RTL 
+(malloc/free, global new/delete, etc.) with the TBB memory allocator. 
+
+Include the following header to a source of any binary which is loaded during 
+application startup
+
+#include "tbb/tbbmalloc_proxy.h"
+
+or add following parameters to the linker options for the binary which is 
+loaded during application startup. It can be either exe-file or dll.
+
+For win32
+tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy"
+win64
+tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy"
+*/
+
+#ifndef __TBB_tbbmalloc_proxy_H
+#define __TBB_tbbmalloc_proxy_H
+
+#if _MSC_VER
+
+#ifdef _DEBUG
+    #pragma comment(lib, "tbbmalloc_proxy_debug.lib")
+#else
+    #pragma comment(lib, "tbbmalloc_proxy.lib")
+#endif
+
+#if defined(_WIN64)
+    #pragma comment(linker, "/include:__TBB_malloc_proxy")
+#else
+    #pragma comment(linker, "/include:___TBB_malloc_proxy")
+#endif
+
+#else
+/* Primarily to support MinGW */
+
+extern "C" void __TBB_malloc_proxy();
+struct __TBB_malloc_proxy_caller {
+    __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); }
+} volatile __TBB_malloc_proxy_helper_object;
+
+#endif // _MSC_VER
+
+#endif //__TBB_tbbmalloc_proxy_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tick_count.h b/deal.II/contrib/tbb/tbb30_104oss/include/tbb/tick_count.h
new file mode 100644 (file)
index 0000000..de6b3c2
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tick_count_H
+#define __TBB_tick_count_H
+
+#include "tbb_stddef.h"
+
+#if _WIN32||_WIN64
+#include "machine/windows_api.h"
+#elif __linux__
+#include <ctime>
+#else /* generic Unix */
+#include <sys/time.h>
+#endif /* (choice of OS) */
+
+namespace tbb {
+
+//! Absolute timestamp
+/** @ingroup timing */
+class tick_count {
+public:
+    //! Relative time interval.
+    class interval_t {
+        long long value;
+        explicit interval_t( long long value_ ) : value(value_) {}
+    public:
+        //! Construct a time interval representing zero time duration
+        interval_t() : value(0) {};
+
+        //! Construct a time interval representing sec seconds time  duration
+        explicit interval_t( double sec );
+
+        //! Return the length of a time interval in seconds
+        double seconds() const;
+
+        friend class tbb::tick_count;
+
+        //! Extract the intervals from the tick_counts and subtract them.
+        friend interval_t operator-( const tick_count& t1, const tick_count& t0 );
+
+        //! Add two intervals.
+        friend interval_t operator+( const interval_t& i, const interval_t& j ) {
+            return interval_t(i.value+j.value);
+        }
+
+        //! Subtract two intervals.
+        friend interval_t operator-( const interval_t& i, const interval_t& j ) {
+            return interval_t(i.value-j.value);
+        }
+
+        //! Accumulation operator
+        interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;}
+
+        //! Subtraction operator
+        interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;}
+    };
+    
+    //! Construct an absolute timestamp initialized to zero.
+    tick_count() : my_count(0) {};
+
+    //! Return current time.
+    static tick_count now();
+    
+    //! Subtract two timestamps to get the time interval between
+    friend interval_t operator-( const tick_count& t1, const tick_count& t0 );
+
+private:
+    long long my_count;
+};
+
+inline tick_count tick_count::now() {
+    tick_count result;
+#if _WIN32||_WIN64
+    LARGE_INTEGER qpcnt;
+    QueryPerformanceCounter(&qpcnt);
+    result.my_count = qpcnt.QuadPart;
+#elif __linux__
+    struct timespec ts;
+#if TBB_USE_ASSERT
+    int status = 
+#endif /* TBB_USE_ASSERT */
+        clock_gettime( CLOCK_REALTIME, &ts );
+    __TBB_ASSERT( status==0, "CLOCK_REALTIME not supported" );
+    result.my_count = static_cast<long long>(1000000000UL)*static_cast<long long>(ts.tv_sec) + static_cast<long long>(ts.tv_nsec);
+#else /* generic Unix */
+    struct timeval tv;
+#if TBB_USE_ASSERT
+    int status = 
+#endif /* TBB_USE_ASSERT */
+        gettimeofday(&tv, NULL);
+    __TBB_ASSERT( status==0, "gettimeofday failed" );
+    result.my_count = static_cast<long long>(1000000)*static_cast<long long>(tv.tv_sec) + static_cast<long long>(tv.tv_usec);
+#endif /*(choice of OS) */
+    return result;
+}
+
+inline tick_count::interval_t::interval_t( double sec )
+{
+#if _WIN32||_WIN64
+    LARGE_INTEGER qpfreq;
+    QueryPerformanceFrequency(&qpfreq);
+    value = static_cast<long long>(sec*qpfreq.QuadPart);
+#elif __linux__
+    value = static_cast<long long>(sec*1E9);
+#else /* generic Unix */
+    value = static_cast<long long>(sec*1E6);
+#endif /* (choice of OS) */
+}
+
+inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) {
+    return tick_count::interval_t( t1.my_count-t0.my_count );
+}
+
+inline double tick_count::interval_t::seconds() const {
+#if _WIN32||_WIN64
+    LARGE_INTEGER qpfreq;
+    QueryPerformanceFrequency(&qpfreq);
+    return value/(double)qpfreq.QuadPart;
+#elif __linux__
+    return value*1E-9;
+#else /* generic Unix */
+    return value*1E-6;
+#endif /* (choice of OS) */
+}
+
+} // namespace tbb
+
+#endif /* __TBB_tick_count_H */
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/index.html b/deal.II/contrib/tbb/tbb30_104oss/index.html
new file mode 100644 (file)
index 0000000..a126891
--- /dev/null
@@ -0,0 +1,44 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+Top level directory for Threading Building Blocks (TBB).
+<P>
+To build TBB, use the <A HREF=Makefile>top-level Makefile</A>; see also the <A HREF=build/index.html#build>build directions</A>.
+To port TBB to a new platform, operating system or architecture, see the <A HREF=build/index.html#port>porting directions</A>.
+</P>
+
+<H2>Files</H2>
+<DL>
+<DT><A HREF="Makefile">Makefile</A>
+<DD>Top-level Makefile for TBB.  See also the <A HREF=build/index.html#build>build directions</A>.
+</DL>
+
+<H2>Directories</H2>
+<DL>
+<DT><A HREF="doc/index.html">doc</A>
+<DD>Documentation for the library.
+<DT><A HREF="include/index.html">include</A>
+<DD>Include files required for compiling code that uses the library.
+<DT><A HREF="examples/index.html">examples</A>
+<DD>Examples of how to use the library.
+<DT><A HREF="src/index.html">src</A>
+<DD>Source code for the library.
+<DT><A HREF="build/index.html">build</A>
+<DD>Internal Makefile infrastructure for TBB.  Do not use directly; see the <A HREF=build/index.html#build>build directions</A>.
+<DT><A HREF="ia32">ia32</A>, <A HREF="intel64">intel64</A>, <A HREF="ia64">ia64</A>
+<DD>Platform-specific binary files for the library.
+</DL>
+
+<HR>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/Makefile b/deal.II/contrib/tbb/tbb30_104oss/src/Makefile
new file mode 100644 (file)
index 0000000..0ae26e7
--- /dev/null
@@ -0,0 +1,205 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+tbb_root?=..
+examples_root:=$(tbb_root)/examples
+include $(tbb_root)/build/common.inc
+
+.PHONY: all tbb tbbmalloc test test_no_depends release debug examples clean
+
+all: release debug examples
+
+tbb: tbb_release tbb_debug
+
+tbbmalloc: tbbmalloc_release tbbmalloc_debug
+
+rml: rml_release rml_debug
+
+test: tbbmalloc_test_release rml_test_release test_release tbbmalloc_test_debug rml_test_debug test_debug
+
+test_no_depends: tbbmalloc_test_release_no_depends test_release_no_depends tbbmalloc_test_debug_no_depends test_debug_no_depends
+       @echo done
+
+release: tbb_release tbbmalloc_release
+release: $(call cross_cfg,tbbmalloc_test_release) $(call cross_cfg,test_release)
+
+debug: tbb_debug tbbmalloc_debug
+debug: $(call cross_cfg,tbbmalloc_test_debug) $(call cross_cfg, test_debug)
+
+examples: tbb tbbmalloc examples_debug clean_examples examples_release
+
+clean: clean_release clean_debug clean_examples
+       @echo clean done
+
+.PHONY: full
+full:
+       $(MAKE) -s -i -r --no-print-directory -f Makefile tbb_root=. clean all
+ifeq ($(tbb_os),windows)
+       $(MAKE) -s -i -r --no-print-directory -f Makefile tbb_root=. compiler=icl clean all native_examples
+else
+       $(MAKE) -s -i -r --no-print-directory -f Makefile tbb_root=. compiler=icc clean all native_examples
+endif
+ifeq ($(arch),intel64)
+       $(MAKE) -s -i -r --no-print-directory -f Makefile tbb_root=. arch=ia32 clean all
+endif
+# it doesn't test compiler=icc arch=ia32 on intel64 systems due to enviroment settings of icc
+
+native_examples: tbb tbbmalloc
+       $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. compiler=$(native_compiler) tbb_build_prefix=$(tbb_build_prefix) debug test
+       $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. compiler=$(native_compiler) tbb_build_prefix=$(tbb_build_prefix) clean release test
+
+../examples/% examples/%::
+       $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. $(subst examples/,,$(subst ../,,$@))
+
+debug_%:: cfg?=debug
+debug_%:: run_cmd=$(debugger)
+test_% stress_% time_% perf_%:: cfg?=release
+debug_% test_% stress_% time_% perf_%::
+       $(MAKE) -C "$(work_dir)_$(cfg)"  -r -f $(tbb_root)/build/Makefile.test cfg=$(cfg) run_cmd="$(run_cmd)" tbb_root=$(tbb_root) $@
+
+clean_%::
+ifeq ($(cfg),)
+       @$(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.test cfg=release tbb_root=$(tbb_root) $@
+       @$(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.test cfg=debug tbb_root=$(tbb_root) $@
+else
+       @$(MAKE) -C "$(work_dir)_$(cfg)"  -r -f $(tbb_root)/build/Makefile.test cfg=$(cfg) tbb_root=$(tbb_root) $@
+endif
+
+.PHONY: tbb_release tbb_debug test_release test_debug test_release_no_depends test_debug_no_depends
+
+# do not delete double-space after -C option
+tbb_release: mkdir_release
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbb cfg=release tbb_root=$(tbb_root)
+
+tbb_debug: mkdir_debug
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbb cfg=debug tbb_root=$(tbb_root)
+
+test_release: $(call cross_cfg,mkdir_release) $(call cross_cfg,tbb_release) test_release_no_depends
+test_release_no_depends: 
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)"  -r -f $(tbb_root)/build/Makefile.test cfg=release tbb_root=$(tbb_root) 
+
+test_debug: $(call cross_cfg,mkdir_debug) $(call cross_cfg,tbb_debug) test_debug_no_depends
+test_debug_no_depends:
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)"  -r -f $(tbb_root)/build/Makefile.test cfg=debug tbb_root=$(tbb_root)
+
+.PHONY: tbbmalloc_release tbbmalloc_debug
+.PHONY: tbbmalloc_dll_release tbbmalloc_dll_debug tbbmalloc_proxy_dll_release tbbmalloc_proxy_dll_debug
+.PHONY: tbbmalloc_test_release tbbmalloc_test_debug tbbmalloc_test_release_no_depends tbbmalloc_test_debug_no_depends
+
+tbbmalloc_release: mkdir_release
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc tbb_root=$(tbb_root)
+
+tbbmalloc_debug: mkdir_debug
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc tbb_root=$(tbb_root)
+
+tbbmalloc_dll_release: mkdir_release
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_dll tbb_root=$(tbb_root)
+
+tbbmalloc_proxy_dll_release: mkdir_release
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release  malloc_proxy_dll tbb_root=$(tbb_root)
+
+tbbmalloc_dll_debug: mkdir_debug
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_dll tbb_root=$(tbb_root)
+
+tbbmalloc_proxy_dll_debug: mkdir_debug
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_proxy_dll tbb_root=$(tbb_root)
+
+tbbmalloc_test_release: $(call cross_cfg,mkdir_release) $(call cross_cfg,tbbmalloc_release) tbbmalloc_test_release_no_depends
+tbbmalloc_test_release_no_depends:
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=release malloc_test tbb_root=$(tbb_root)
+
+tbbmalloc_test_debug: $(call cross_cfg,mkdir_debug) $(call cross_cfg,tbbmalloc_debug) tbbmalloc_test_debug_no_depends
+tbbmalloc_test_debug_no_depends:
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)"  -r -f $(tbb_root)/build/Makefile.tbbmalloc cfg=debug malloc_test tbb_root=$(tbb_root)
+
+.PHONY: rml_release rml_debug rml_test_release rml_test_debug
+.PHONY: rml_test_release_no_depends rml_test_debug_no_depends
+
+rml_release: mkdir_release
+       $(MAKE) -C "$(work_dir)_release"  -r -f $(tbb_root)/build/Makefile.rml cfg=release tbb_root=$(tbb_root) rml
+
+rml_debug: mkdir_debug
+       $(MAKE) -C "$(work_dir)_debug"  -r -f $(tbb_root)/build/Makefile.rml cfg=debug tbb_root=$(tbb_root) rml
+
+rml_test_release: $(call cross_cfg,mkdir_release) $(call cross_cfg,rml_release) rml_test_release_no_depends
+rml_test_release_no_depends:
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_release)"  -r -f $(tbb_root)/build/Makefile.rml cfg=release rml_test tbb_root=$(tbb_root)
+
+rml_test_debug: $(call cross_cfg,mkdir_debug) $(call cross_cfg,rml_debug) rml_test_debug_no_depends
+rml_test_debug_no_depends:
+       $(MAKE) -C "$(call cross_cfg,$(work_dir)_debug)"  -r -f $(tbb_root)/build/Makefile.rml cfg=debug rml_test tbb_root=$(tbb_root)
+
+.PHONY: examples_release examples_debug
+
+examples_release: tbb_release tbbmalloc_release
+       $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. release test
+
+examples_debug: tbb_debug tbbmalloc_debug
+       $(MAKE) -C $(examples_root) -r -f Makefile tbb_root=.. debug test
+
+.PHONY: clean_release clean_debug clean_examples
+
+clean_release:
+       $(shell $(RM) $(work_dir)_release$(SLASH)*.* >$(NUL) 2>$(NUL))
+       $(shell $(RD) $(work_dir)_release >$(NUL) 2>$(NUL))
+
+clean_debug:
+       $(shell $(RM) $(work_dir)_debug$(SLASH)*.* >$(NUL) 2>$(NUL))
+       $(shell $(RD) $(work_dir)_debug >$(NUL) 2>$(NUL))
+
+clean_examples:
+       $(shell $(MAKE) -s -i -r -C $(examples_root) -f Makefile tbb_root=.. clean >$(NUL) 2>$(NUL))
+
+.PHONY: mkdir_release mkdir_debug codecov do_codecov info
+
+mkdir_release:
+       $(shell $(MD) "$(work_dir)_release" >$(NUL) 2>$(NUL))
+       @echo Created $(work_dir)_release directory
+
+mkdir_debug:
+       $(shell $(MD) "$(work_dir)_debug" >$(NUL) 2>$(NUL))
+       @echo Created $(work_dir)_debug directory
+
+codecov: compiler=$(if $(findstring windows,$(tbb_os)),icl,icc)
+codecov:
+       $(MAKE) tbb_root=.. codecov=yes do_codecov
+
+do_codecov:
+       $(MAKE) RML=yes tbbmalloc_test_release test_release
+       $(MAKE) clean_test_* cfg=release
+       $(MAKE) RML=yes crosstest=yes tbbmalloc_test_debug test_debug
+       $(MAKE) clean_test_* cfg=release
+       $(MAKE) rml_test_release
+       $(MAKE) clean_test_* cfg=release
+       $(MAKE) crosstest=yes rml_test_debug
+       $(MAKE) -C "$(work_dir)_release" -r -f $(tbb_root)/build/Makefile.test tbb_root=$(tbb_root) cfg=release codecov=yes codecov_gen
+
+info:
+       @echo OS: $(tbb_os)
+       @echo arch=$(arch)
+       @echo compiler=$(compiler)
+       @echo runtime=$(runtime)
+       @echo tbb_build_prefix=$(tbb_build_prefix)
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/index.html
new file mode 100644 (file)
index 0000000..8caaeac
--- /dev/null
@@ -0,0 +1,77 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+This directory contains the source code and unit tests for Threading Building Blocks.
+
+<H2>Directories</H2>
+<DL>
+<DT><A HREF="tbb">tbb</A>
+<DD>Source code of the TBB library core.
+<DT><A HREF="tbbmalloc">tbbmalloc</A>
+<DD>Source code of the TBB scalable memory allocator.
+<DT><A HREF="test">test</A>
+<DD>Source code of the TBB unit tests.
+<DT><A HREF="rml">rml</A>
+<DD>Source code of the Resource Management Layer (RML).
+<DT><A HREF="perf">perf</A>
+<DD>Source code of microbenchmarks.
+<DT><A HREF="old">old</A>
+<DD>Source code of deprecated TBB entities that are still shipped as part of the TBB library for the sake of backward compatibility.
+</DL>
+
+<h2>Files</h2>
+<dl>
+<dt><a href="Makefile">Makefile</a>
+</dt><dd>Advanced Makefile for developing and debugging of TBB. See the <a href="../build/index.html#build">basic build directions</a>. Additional targets and options:
+    <dl>
+    <dt><tt>make test_{name} time_{name}</tt>
+    </dt><dd>Make and run individual test or benchmark.</dd>
+    <dt><tt>make stress_{name}</tt>
+    </dt><dd>Equivalent to 'make test_{name}' but runs until a failure detected or terminated by user.</dd>
+    <dt><tt>make run_cmd="{command}" [(above options or targets)]</tt>
+    </dt><dd>Command prefix for tests execution. Also, "run_cmd=-" will ignore test execution failures. See also -k and -i options of the GNU make for more options to keep building and testing despite of failures.</dd>
+    <dt><tt>make debug_{name}</tt>
+    </dt><dd>Equivalent to 'make test_{name}' but compiles in debug mode and runs under debugger ("run_cmd=$(debugger)").</dd>
+    <dt><tt>make args="{command-line arguments}" [(above options or targets)]</tt>
+    </dt><dd>Additional arguments for the run.</dd>
+    <dt><tt>make repeat="{N}" [(above options or targets)]</tt>
+    </dt><dd>Repeats execution N times.</dd>
+    <dt><tt>make clean_{filename}</tt>
+    </dt><dd>Removes executable, object, and other intermediate files with specified filename ('*' also works).</dd>
+    <dt><tt>make cfg={debug|release} [(above options or targets)]</tt>
+    </dt><dd>Specifies a build mode or corresponding directory to work in.</dd>
+    <dt><tt>make tbb_strict=1 [(above options or targets)]</tt>
+    </dt><dd>Enables warnings as errors.</dd>
+    <dt><tt>make examples/{target}</tt>
+    </dt><dd>Invokes examples/Makefile with specified target.</dd>
+    <dt><tt>make clean_release clean_debug clean_examples</tt>
+    </dt><dd>Removes release or debug build directories, or cleans all examples.</dd>
+    <dt><tt>make test_no_depends</tt>
+    </dt><dd>Equivalent to 'make test' but does not check for libraries updates.</dd>
+    <dt><tt>make info</tt>
+    </dt><dd>Output information about build configuration and directories.</dd>
+    <dt><tt>make cpp0x=1 [(above options or targets)]</tt>
+    </dt><dd>Enables C++0x extensions like lambdas for compilers that implement them as experimental features.</dd>
+    <dt><tt>make CXXFLAGS={Flags} [(above options or targets)]</tt>
+    </dt><dd>Specifies additional options for compiler.</dd>
+    <dt><tt>make target={name} [(above options or targets)]</tt>
+    </dt><dd>Includes additional build/{name}.inc file after OS-specific one.</dd>
+    <dt><tt>make extra_inc={filename} [(above options or targets)]</tt>
+    </dt><dd>Includes additional makefile.</dd>
+
+    </dd></dl>
+</dd></dl>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.cpp
new file mode 100644 (file)
index 0000000..0359a95
--- /dev/null
@@ -0,0 +1,382 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "concurrent_queue_v2.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/atomic.h"
+#include <cstring>
+#include <stdio.h>
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif
+
+#define RECORD_EVENTS 0
+
+using namespace std;
+
+namespace tbb {
+
+namespace internal {
+
+class concurrent_queue_rep;
+
+//! A queue using simple locking.
+/** For efficient, this class has no constructor.  
+    The caller is expected to zero-initialize it. */
+struct micro_queue {
+    typedef concurrent_queue_base::page page;
+    typedef size_t ticket;
+
+    atomic<page*> head_page;
+    atomic<ticket> head_counter;
+
+    atomic<page*> tail_page;
+    atomic<ticket> tail_counter;
+
+    spin_mutex page_mutex;
+    
+    class push_finalizer: no_copy {
+        ticket my_ticket;
+        micro_queue& my_queue;
+    public:
+        push_finalizer( micro_queue& queue, ticket k ) :
+            my_ticket(k), my_queue(queue)
+        {}
+        ~push_finalizer() {
+            my_queue.tail_counter = my_ticket;
+        }
+    };
+
+    void push( const void* item, ticket k, concurrent_queue_base& base );
+
+    class pop_finalizer: no_copy {
+        ticket my_ticket;
+        micro_queue& my_queue;
+        page* my_page; 
+    public:
+        pop_finalizer( micro_queue& queue, ticket k, page* p ) :
+            my_ticket(k), my_queue(queue), my_page(p)
+        {}
+        ~pop_finalizer() {
+            page* p = my_page;
+            if( p ) {
+                spin_mutex::scoped_lock lock( my_queue.page_mutex );
+                page* q = p->next;
+                my_queue.head_page = q;
+                if( !q ) {
+                    my_queue.tail_page = NULL;
+                }
+            }
+            my_queue.head_counter = my_ticket;
+            if( p ) 
+                operator delete(p);
+        }
+    };
+
+    bool pop( void* dst, ticket k, concurrent_queue_base& base );
+};
+
+//! Internal representation of a ConcurrentQueue.
+/** For efficient, this class has no constructor.  
+    The caller is expected to zero-initialize it. */
+class concurrent_queue_rep {
+public:
+    typedef size_t ticket;
+
+private:
+    friend struct micro_queue;
+
+    //! Approximately n_queue/golden ratio
+    static const size_t phi = 3;
+
+public:
+    //! Must be power of 2
+    static const size_t n_queue = 8; 
+
+    //! Map ticket to an array index
+    static size_t index( ticket k ) {
+        return k*phi%n_queue;
+    }
+
+    atomic<ticket> head_counter;
+    char pad1[NFS_MaxLineSize-sizeof(atomic<ticket>)];
+
+    atomic<ticket> tail_counter;
+    char pad2[NFS_MaxLineSize-sizeof(atomic<ticket>)];
+    micro_queue array[n_queue];    
+
+    micro_queue& choose( ticket k ) {
+        // The formula here approximates LRU in a cache-oblivious way.
+        return array[index(k)];
+    }
+
+    //! Value for effective_capacity that denotes unbounded queue.
+    static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2);
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // unary minus operator applied to unsigned type, result still unsigned
+    #pragma warning( push )
+    #pragma warning( disable: 4146 )
+#endif
+
+//------------------------------------------------------------------------
+// micro_queue
+//------------------------------------------------------------------------
+void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) {
+    k &= -concurrent_queue_rep::n_queue;
+    page* p = NULL;
+    size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1);
+    if( !index ) {
+        size_t n = sizeof(page) + base.items_per_page*base.item_size;
+        p = static_cast<page*>(operator new( n ));
+        p->mask = 0;
+        p->next = NULL;
+    }
+    {
+        push_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue ); 
+        spin_wait_until_eq( tail_counter, k );
+        if( p ) {
+            spin_mutex::scoped_lock lock( page_mutex );
+            if( page* q = tail_page )
+                q->next = p;
+            else
+                head_page = p; 
+            tail_page = p;
+        } else {
+            p = tail_page;
+        }
+        base.copy_item( *p, index, item );
+        // If no exception was thrown, mark item as present.
+        p->mask |= uintptr_t(1)<<index;
+    } 
+}
+
+bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) {
+    k &= -concurrent_queue_rep::n_queue;
+    spin_wait_until_eq( head_counter, k );
+    spin_wait_while_eq( tail_counter, k );
+    page& p = *head_page;
+    __TBB_ASSERT( &p, NULL );
+    size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1);
+    bool success = false; 
+    {
+        pop_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); 
+        if( p.mask & uintptr_t(1)<<index ) {
+            success = true;
+            base.assign_and_destroy_item( dst, p, index );
+        }
+    }
+    return success;
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+//------------------------------------------------------------------------
+// concurrent_queue_base
+//------------------------------------------------------------------------
+concurrent_queue_base::concurrent_queue_base( size_t item_size ) {
+    items_per_page = item_size<=8 ? 32 :
+                     item_size<=16 ? 16 : 
+                     item_size<=32 ? 8 :
+                     item_size<=64 ? 4 :
+                     item_size<=128 ? 2 :
+                     1;
+    my_capacity = size_t(-1)/(item_size>1 ? item_size : 2); 
+    my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1);
+    __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" );
+    memset(my_rep,0,sizeof(concurrent_queue_rep));
+    this->item_size = item_size;
+}
+
+concurrent_queue_base::~concurrent_queue_base() {
+    size_t nq = my_rep->n_queue;
+    for( size_t i=0; i<nq; i++ ) {
+        page* tp = my_rep->array[i].tail_page;
+        __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" );
+        if( tp!=NULL )
+            delete tp;
+    }
+    cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1);
+}
+
+void concurrent_queue_base::internal_push( const void* src ) {
+    concurrent_queue_rep& r = *my_rep;
+    concurrent_queue_rep::ticket k  = r.tail_counter++;
+    ptrdiff_t e = my_capacity;
+    if( e<concurrent_queue_rep::infinite_capacity ) {
+        atomic_backoff backoff;
+        for(;;) {
+            if( (ptrdiff_t)(k-r.head_counter)<e ) break;
+            backoff.pause();
+            e = const_cast<volatile ptrdiff_t&>(my_capacity);
+        }
+    } 
+    r.choose(k).push(src,k,*this);
+}
+
+void concurrent_queue_base::internal_pop( void* dst ) {
+    concurrent_queue_rep& r = *my_rep;
+    concurrent_queue_rep::ticket k;
+    do {
+        k = r.head_counter++;
+    } while( !r.choose(k).pop(dst,k,*this) );
+}
+
+bool concurrent_queue_base::internal_pop_if_present( void* dst ) {
+    concurrent_queue_rep& r = *my_rep;
+    concurrent_queue_rep::ticket k;
+    do {
+        atomic_backoff backoff;
+        for(;;) {
+            k = r.head_counter;
+            if( r.tail_counter<=k ) {
+                // Queue is empty 
+                return false;
+            }
+            // Queue had item with ticket k when we looked.  Attempt to get that item.
+            if( r.head_counter.compare_and_swap(k+1,k)==k ) {
+                break;
+            }
+            // Another thread snatched the item, so pause and retry.
+            backoff.pause();
+        }
+    } while( !r.choose(k).pop(dst,k,*this) );
+    return true;
+}
+
+bool concurrent_queue_base::internal_push_if_not_full( const void* src ) {
+    concurrent_queue_rep& r = *my_rep;
+    atomic_backoff backoff;
+    concurrent_queue_rep::ticket k;
+    for(;;) {
+        k = r.tail_counter;
+        if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) {
+            // Queue is full
+            return false;
+        }
+        // Queue had empty slot with ticket k when we looked.  Attempt to claim that slot.
+        if( r.tail_counter.compare_and_swap(k+1,k)==k ) 
+            break;
+        // Another thread claimed the slot, so pause and retry.
+        backoff.pause();
+    }
+    r.choose(k).push(src,k,*this);
+    return true;
+}
+
+ptrdiff_t concurrent_queue_base::internal_size() const {
+    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
+    return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter);
+}
+
+void concurrent_queue_base::internal_set_capacity( ptrdiff_t capacity, size_t /*item_size*/ ) {
+    my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity;
+}
+
+//------------------------------------------------------------------------
+// concurrent_queue_iterator_rep
+//------------------------------------------------------------------------
+class  concurrent_queue_iterator_rep: no_assign {
+public:
+    typedef concurrent_queue_rep::ticket ticket;
+    ticket head_counter;   
+    const concurrent_queue_base& my_queue;
+    concurrent_queue_base::page* array[concurrent_queue_rep::n_queue];
+    concurrent_queue_iterator_rep( const concurrent_queue_base& queue ) : 
+        head_counter(queue.my_rep->head_counter),
+        my_queue(queue)
+    {
+        const concurrent_queue_rep& rep = *queue.my_rep;
+        for( size_t k=0; k<concurrent_queue_rep::n_queue; ++k )
+            array[k] = rep.array[k].head_page;
+    }
+    //! Get pointer to kth element
+    void* choose( size_t k ) {
+        if( k==my_queue.my_rep->tail_counter )
+            return NULL;
+        else {
+            concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)];
+            __TBB_ASSERT(p,NULL);
+            size_t i = k/concurrent_queue_rep::n_queue & my_queue.items_per_page-1;
+            return static_cast<unsigned char*>(static_cast<void*>(p+1)) + my_queue.item_size*i;
+        }
+    }
+};
+
+//------------------------------------------------------------------------
+// concurrent_queue_iterator_base
+//------------------------------------------------------------------------
+concurrent_queue_iterator_base::concurrent_queue_iterator_base( const concurrent_queue_base& queue ) {
+    my_rep = new concurrent_queue_iterator_rep(queue);
+    my_item = my_rep->choose(my_rep->head_counter);
+}
+
+void concurrent_queue_iterator_base::assign( const concurrent_queue_iterator_base& other ) {
+    if( my_rep!=other.my_rep ) {
+        if( my_rep ) {
+            delete my_rep;
+            my_rep = NULL;
+        }
+        if( other.my_rep ) {
+            my_rep = new concurrent_queue_iterator_rep( *other.my_rep );
+        }
+    }
+    my_item = other.my_item;
+}
+
+void concurrent_queue_iterator_base::advance() {
+    __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" );  
+    size_t k = my_rep->head_counter;
+    const concurrent_queue_base& queue = my_rep->my_queue;
+    __TBB_ASSERT( my_item==my_rep->choose(k), NULL );
+    size_t i = k/concurrent_queue_rep::n_queue & queue.items_per_page-1;
+    if( i==queue.items_per_page-1 ) {
+        concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)];
+        root = root->next;
+    }
+    my_rep->head_counter = k+1;
+    my_item = my_rep->choose(k+1);
+}
+
+concurrent_queue_iterator_base::~concurrent_queue_iterator_base() {
+    delete my_rep;
+    my_rep = NULL;
+}
+
+} // namespace internal
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.h b/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_queue_v2.h
new file mode 100644 (file)
index 0000000..d55b5a3
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_queue_H
+#define __TBB_concurrent_queue_H
+
+#include "tbb/tbb_stddef.h"
+#include <new>
+
+namespace tbb {
+
+template<typename T> class concurrent_queue;
+
+//! @cond INTERNAL
+namespace internal {
+
+class concurrent_queue_rep;
+class concurrent_queue_iterator_rep;
+class concurrent_queue_iterator_base;
+template<typename Container, typename Value> class concurrent_queue_iterator;
+
+//! For internal use only.
+/** Type-independent portion of concurrent_queue.
+    @ingroup containers */
+class concurrent_queue_base: no_copy {
+    //! Internal representation
+    concurrent_queue_rep* my_rep;
+
+    friend class concurrent_queue_rep;
+    friend struct micro_queue;
+    friend class concurrent_queue_iterator_rep;
+    friend class concurrent_queue_iterator_base;
+protected:
+    //! Prefix on a page
+    struct page {
+        page* next;
+        uintptr_t mask; 
+    };
+
+    //! Capacity of the queue
+    ptrdiff_t my_capacity;
+   
+    //! Always a power of 2
+    size_t items_per_page;
+
+    //! Size of an item
+    size_t item_size;
+private:
+    virtual void copy_item( page& dst, size_t index, const void* src ) = 0;
+    virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0;
+protected:
+    __TBB_EXPORTED_METHOD concurrent_queue_base( size_t item_size );
+    virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base();
+
+    //! Enqueue item at tail of queue
+    void __TBB_EXPORTED_METHOD internal_push( const void* src );
+
+    //! Dequeue item from head of queue
+    void __TBB_EXPORTED_METHOD internal_pop( void* dst );
+
+    //! Attempt to enqueue item onto queue.
+    bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src );
+
+    //! Attempt to dequeue item from queue.
+    /** NULL if there was no item to dequeue. */
+    bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst );
+
+    //! Get size of queue
+    ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const;
+
+    void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size );
+};
+
+//! Type-independent portion of concurrent_queue_iterator.
+/** @ingroup containers */
+class concurrent_queue_iterator_base {
+    //! Concurrentconcurrent_queue over which we are iterating.
+    /** NULL if one past last element in queue. */
+    concurrent_queue_iterator_rep* my_rep;
+
+    template<typename C, typename T, typename U>
+    friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+
+    template<typename C, typename T, typename U>
+    friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
+protected:
+    //! Pointer to current item
+    mutable void* my_item;
+
+    //! Default constructor
+    __TBB_EXPORTED_METHOD concurrent_queue_iterator_base() : my_rep(NULL), my_item(NULL) {}
+
+    //! Copy constructor
+    concurrent_queue_iterator_base( const concurrent_queue_iterator_base& i ) : my_rep(NULL), my_item(NULL) {
+        assign(i);
+    }
+
+    //! Construct iterator pointing to head of queue.
+    concurrent_queue_iterator_base( const concurrent_queue_base& queue );
+
+    //! Assignment
+    void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base& i );
+
+    //! Advance iterator one step towards tail of queue.
+    void __TBB_EXPORTED_METHOD advance();
+
+    //! Destructor
+    __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base();
+};
+
+//! Meets requirements of a forward iterator for STL.
+/** Value is either the T or const T type of the container.
+    @ingroup containers */
+template<typename Container, typename Value>
+class concurrent_queue_iterator: public concurrent_queue_iterator_base {
+#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+    template<typename T>
+    friend class ::tbb::concurrent_queue;
+#else
+public: // workaround for MSVC
+#endif 
+    //! Construct iterator pointing to head of queue.
+    concurrent_queue_iterator( const concurrent_queue_base& queue ) :
+        concurrent_queue_iterator_base(queue)
+    {
+    }
+public:
+    concurrent_queue_iterator() {}
+
+    /** If Value==Container::value_type, then this routine is the copy constructor. 
+        If Value==const Container::value_type, then this routine is a conversion constructor. */
+    concurrent_queue_iterator( const concurrent_queue_iterator<Container,typename Container::value_type>& other ) :
+        concurrent_queue_iterator_base(other)
+    {}
+
+    //! Iterator assignment
+    concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) {
+        assign(other);
+        return *this;
+    }
+
+    //! Reference to current item 
+    Value& operator*() const {
+        return *static_cast<Value*>(my_item);
+    }
+
+    Value* operator->() const {return &operator*();}
+
+    //! Advance to next item in queue
+    concurrent_queue_iterator& operator++() {
+        advance();
+        return *this;
+    }
+
+    //! Post increment
+    Value* operator++(int) {
+        Value* result = &operator*();
+        operator++();
+        return result;
+    }
+}; // concurrent_queue_iterator
+
+template<typename C, typename T, typename U>
+bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item==j.my_item;
+}
+
+template<typename C, typename T, typename U>
+bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ) {
+    return i.my_item!=j.my_item;
+}
+
+} // namespace internal;
+//! @endcond
+
+//! A high-performance thread-safe queue.
+/** Multiple threads may each push and pop concurrently.
+    Assignment and copy construction are not allowed.
+    @ingroup containers */
+template<typename T>
+class concurrent_queue: public internal::concurrent_queue_base {
+    template<typename Container, typename Value> friend class internal::concurrent_queue_iterator;
+
+    //! Class used to ensure exception-safety of method "pop" 
+    class destroyer {
+        T& my_value;
+    public:
+        destroyer( T& value ) : my_value(value) {}
+        ~destroyer() {my_value.~T();}          
+    };
+
+    T& get_ref( page& page, size_t index ) {
+        __TBB_ASSERT( index<items_per_page, NULL );
+        return static_cast<T*>(static_cast<void*>(&page+1))[index];
+    }
+
+    /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) {
+        new( &get_ref(dst,index) ) T(*static_cast<const T*>(src)); 
+    }
+
+    /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) {
+        T& from = get_ref(src,index);
+        destroyer d(from);
+        *static_cast<T*>(dst) = from;
+    }
+
+public:
+    //! Element type in the queue.
+    typedef T value_type;
+
+    //! Reference type
+    typedef T& reference;
+
+    //! Const reference type
+    typedef const T& const_reference;
+
+    //! Integral type for representing size of the queue.
+    /** Notice that the size_type is a signed integral type.
+        This is because the size can be negative if there are pending pops without corresponding pushes. */
+    typedef std::ptrdiff_t size_type;
+
+    //! Difference type for iterator
+    typedef std::ptrdiff_t difference_type;
+
+    //! Construct empty queue
+    concurrent_queue() : 
+        concurrent_queue_base( sizeof(T) )
+    {
+    }
+
+    //! Destroy queue
+    ~concurrent_queue();
+
+    //! Enqueue an item at tail of queue.
+    void push( const T& source ) {
+        internal_push( &source );
+    }
+
+    //! Dequeue item from head of queue.
+    /** Block until an item becomes available, and then dequeue it. */
+    void pop( T& destination ) {
+        internal_pop( &destination );
+    }
+
+    //! Enqueue an item at tail of queue if queue is not already full.
+    /** Does not wait for queue to become not full.
+        Returns true if item is pushed; false if queue was already full. */
+    bool push_if_not_full( const T& source ) {
+        return internal_push_if_not_full( &source );
+    }
+
+    //! Attempt to dequeue an item from head of queue.
+    /** Does not wait for item to become available.
+        Returns true if successful; false otherwise. */
+    bool pop_if_present( T& destination ) {
+        return internal_pop_if_present( &destination );
+    }
+
+    //! Return number of pushes minus number of pops.
+    /** Note that the result can be negative if there are pops waiting for the 
+        corresponding pushes.  The result can also exceed capacity() if there 
+        are push operations in flight. */
+    size_type size() const {return internal_size();}
+
+    //! Equivalent to size()<=0.
+    bool empty() const {return size()<=0;}
+
+    //! Maximum number of allowed elements
+    size_type capacity() const {
+        return my_capacity;
+    }
+
+    //! Set the capacity
+    /** Setting the capacity to 0 causes subsequent push_if_not_full operations to always fail,
+        and subsequent push operations to block forever. */
+    void set_capacity( size_type capacity ) {
+        internal_set_capacity( capacity, sizeof(T) );
+    }
+
+    typedef internal::concurrent_queue_iterator<concurrent_queue,T> iterator;
+    typedef internal::concurrent_queue_iterator<concurrent_queue,const T> const_iterator;
+
+    //------------------------------------------------------------------------
+    // The iterators are intended only for debugging.  They are slow and not thread safe.
+    //------------------------------------------------------------------------
+    iterator begin() {return iterator(*this);}
+    iterator end() {return iterator();}
+    const_iterator begin() const {return const_iterator(*this);}
+    const_iterator end() const {return const_iterator();}
+    
+}; 
+
+template<typename T>
+concurrent_queue<T>::~concurrent_queue() {
+    while( !empty() ) {
+        T value;
+        internal_pop(&value);
+    }
+}
+
+} // namespace tbb
+
+#endif /* __TBB_concurrent_queue_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.cpp
new file mode 100644 (file)
index 0000000..b0d161d
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "concurrent_vector_v2.h"
+#include "tbb/tbb_machine.h"
+#include "../tbb/itt_notify.h"
+#include "tbb/task.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif
+
+namespace tbb {
+
+namespace internal {
+
+void concurrent_vector_base::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ) {
+    size_type e = my_early_size;
+    while( e<new_size ) {
+        size_type f = my_early_size.compare_and_swap(new_size,e);
+        if( f==e ) {
+            internal_grow( e, new_size, element_size, init );
+            return;
+        }
+        e = f;
+    }
+}
+
+class concurrent_vector_base::helper {
+    static void extend_segment( concurrent_vector_base& v );
+public:
+    static segment_index_t find_segment_end( const concurrent_vector_base& v ) {
+        const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64;
+        const size_t pointers_per_short_segment = 2;
+        //unsigned u = v.my_segment==v.my_storage ? pointers_per_short_segment : pointers_per_long_segment;
+        segment_index_t u = v.my_segment==(&(v.my_storage[0])) ? pointers_per_short_segment : pointers_per_long_segment;
+        segment_index_t k = 0;
+        while( k<u && v.my_segment[k].array )
+            ++k;
+        return k;
+    }
+    static void extend_segment_if_necessary( concurrent_vector_base& v, size_t k ) {
+        const size_t pointers_per_short_segment = 2;
+        if( k>=pointers_per_short_segment && v.my_segment==v.my_storage ) {
+            extend_segment(v);
+        }
+    }
+};
+
+void concurrent_vector_base::helper::extend_segment( concurrent_vector_base& v ) {
+    const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64;
+    segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_segment, sizeof(segment_t), NULL );
+    std::memset( s, 0, pointers_per_long_segment*sizeof(segment_t) );
+    // If other threads are trying to set pointers in the short segment, wait for them to finish their
+    // assigments before we copy the short segment to the long segment.
+    atomic_backoff backoff;
+    while( !v.my_storage[0].array || !v.my_storage[1].array ) {
+        backoff.pause();
+    }
+    s[0] = v.my_storage[0]; 
+    s[1] = v.my_storage[1]; 
+    if( v.my_segment.compare_and_swap( s, v.my_storage )!=v.my_storage ) 
+        NFS_Free(s);
+}
+
+concurrent_vector_base::size_type concurrent_vector_base::internal_capacity() const {
+    return segment_base( helper::find_segment_end(*this) );
+}
+
+void concurrent_vector_base::internal_reserve( size_type n, size_type element_size, size_type max_size ) {
+    if( n>max_size ) {
+        __TBB_THROW( std::length_error("argument to ConcurrentVector::reserve exceeds ConcurrentVector::max_size()") );
+    }
+    for( segment_index_t k = helper::find_segment_end(*this); segment_base(k)<n; ++k ) {
+        helper::extend_segment_if_necessary(*this,k);
+        size_t m = segment_size(k);
+        __TBB_ASSERT( !my_segment[k].array, "concurrent operation during reserve(...)?" );
+        my_segment[k].array = NFS_Allocate( m, element_size, NULL );
+    }
+}
+
+void concurrent_vector_base::internal_copy( const concurrent_vector_base& src, size_type element_size, internal_array_op2 copy ) {
+    size_type n = src.my_early_size;
+    my_early_size = n;
+    my_segment = my_storage;
+    if( n ) {
+        size_type b;
+        for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
+            helper::extend_segment_if_necessary(*this,k);
+            size_t m = segment_size(k);
+            __TBB_ASSERT( !my_segment[k].array, "concurrent operation during copy construction?" );
+            my_segment[k].array = NFS_Allocate( m, element_size, NULL );
+            if( m>n-b ) m = n-b; 
+            copy( my_segment[k].array, src.my_segment[k].array, m );
+        }
+    }
+}
+
+void concurrent_vector_base::internal_assign( const concurrent_vector_base& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) {
+    size_type n = src.my_early_size;
+    while( my_early_size>n ) { 
+        segment_index_t k = segment_index_of( my_early_size-1 );
+        size_type b=segment_base(k);
+        size_type new_end = b>=n ? b : n;
+        __TBB_ASSERT( my_early_size>new_end, NULL );
+        destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end );
+        my_early_size = new_end;
+    }
+    size_type dst_initialized_size = my_early_size;
+    my_early_size = n;
+    size_type b;
+    for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
+        helper::extend_segment_if_necessary(*this,k);
+        size_t m = segment_size(k);
+        if( !my_segment[k].array )
+            my_segment[k].array = NFS_Allocate( m, element_size, NULL );
+        if( m>n-b ) m = n-b; 
+        size_type a = 0;
+        if( dst_initialized_size>b ) {
+            a = dst_initialized_size-b;
+            if( a>m ) a = m;
+            assign( my_segment[k].array, src.my_segment[k].array, a );
+            m -= a; 
+            a *= element_size; 
+        }
+        if( m>0 ) 
+            copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m );
+    }
+    __TBB_ASSERT( src.my_early_size==n, "detected use of ConcurrentVector::operator= with right side that was concurrently modified" );
+}
+
+void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) {
+    __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL );
+    //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size);
+    size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size);
+    index = tmp;
+    segment_index_t k_old = segment_index_of( tmp );
+    size_type base = segment_base(k_old);
+    helper::extend_segment_if_necessary(*this,k_old);
+    segment_t& s = my_segment[k_old];
+    void* array = s.array;
+    if( !array ) {
+        // FIXME - consider factoring this out and share with internal_grow_by
+       if( base==tmp ) {
+           __TBB_ASSERT( !s.array, NULL );
+            size_t n = segment_size(k_old);
+           array = NFS_Allocate( n, element_size, NULL );
+           ITT_NOTIFY( sync_releasing, &s.array );
+           s.array = array;
+       } else {
+           ITT_NOTIFY(sync_prepare, &s.array);
+           spin_wait_while_eq( s.array, (void*)0 );
+           ITT_NOTIFY(sync_acquired, &s.array);
+           array = s.array;
+       }
+    }
+    size_type j_begin = tmp-base;
+    return (void*)((char*)array+element_size*j_begin);
+}
+
+concurrent_vector_base::size_type concurrent_vector_base::internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ) {
+    size_type result = my_early_size.fetch_and_add(delta);
+    internal_grow( result, result+delta, element_size, init );
+    return result;
+}
+
+void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) {
+    __TBB_ASSERT( start<finish, "start must be less than finish" );
+    size_t tmp = start;
+    do {
+        segment_index_t k_old = segment_index_of( tmp );
+        size_type base = segment_base(k_old);
+        size_t n = segment_size(k_old);
+        helper::extend_segment_if_necessary(*this,k_old);
+        segment_t& s = my_segment[k_old];
+        void* array = s.array;
+        if( !array ) {
+            if( base==tmp ) {
+                __TBB_ASSERT( !s.array, NULL );
+                array = NFS_Allocate( n, element_size, NULL );
+                ITT_NOTIFY( sync_releasing, &s.array );
+                s.array = array;
+            } else {
+                ITT_NOTIFY(sync_prepare, &s.array);
+                spin_wait_while_eq( s.array, (void*)0 );
+                ITT_NOTIFY(sync_acquired, &s.array);
+                array = s.array;
+            }
+        }
+        size_type j_begin = tmp-base;
+        size_type j_end = n > finish-base ? finish-base : n;
+        (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin );
+        tmp = base+j_end;
+    } while( tmp<finish );
+}
+
+void concurrent_vector_base::internal_clear( internal_array_op1 destroy, bool reclaim_storage ) {
+    // Set "my_early_size" early, so that subscripting errors can be caught.
+    // FIXME - doing so may be hurting exception saftey
+    __TBB_ASSERT( my_segment, NULL );
+    size_type finish = my_early_size;
+    my_early_size = 0;
+    while( finish>0 ) {
+        segment_index_t k_old = segment_index_of(finish-1);
+        segment_t& s = my_segment[k_old];
+        __TBB_ASSERT( s.array, NULL );
+        size_type base = segment_base(k_old);
+        size_type j_end = finish-base;
+        __TBB_ASSERT( j_end, NULL );
+        (*destroy)( s.array, j_end );
+        finish = base;
+    }
+
+    // Free the arrays
+    if( reclaim_storage ) {
+        size_t k = helper::find_segment_end(*this);
+        while( k>0 ) {
+            --k;
+            segment_t& s = my_segment[k];
+            void* array = s.array;
+            s.array = NULL;
+            NFS_Free( array );
+        }
+        // Clear short segment.  
+        my_storage[0].array = NULL;
+        my_storage[1].array = NULL;
+        segment_t* s = my_segment;
+        if( s!=my_storage ) {
+            my_segment = my_storage;
+            NFS_Free( s );
+        } 
+    }
+}
+
+} // namespace internal
+
+} // tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.h b/deal.II/contrib/tbb/tbb30_104oss/src/old/concurrent_vector_v2.h
new file mode 100644 (file)
index 0000000..738d29a
--- /dev/null
@@ -0,0 +1,522 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_vector_H
+#define __TBB_concurrent_vector_H
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/atomic.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/blocked_range.h"
+#include "tbb/tbb_machine.h"
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <iterator>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+namespace tbb {
+
+template<typename T>
+class concurrent_vector;
+
+//! @cond INTERNAL
+namespace internal {
+
+    //! Base class of concurrent vector implementation.
+    /** @ingroup containers */
+    class concurrent_vector_base {
+    protected:
+        typedef unsigned long segment_index_t;
+
+        //! Log2 of "min_segment_size".  
+        static const int lg_min_segment_size = 4;
+
+        //! Minimum size (in physical items) of a segment.
+        static const int min_segment_size = segment_index_t(1)<<lg_min_segment_size;
+      
+        static segment_index_t segment_index_of( size_t index ) { 
+            uintptr_t i = index|1<<(lg_min_segment_size-1);
+            uintptr_t j = __TBB_Log2(i); 
+            return segment_index_t(j-(lg_min_segment_size-1)); 
+        }
+
+        static segment_index_t segment_base( segment_index_t k ) { 
+            return min_segment_size>>1<<k & -min_segment_size;
+        }
+
+        static segment_index_t segment_size( segment_index_t k ) {
+            segment_index_t result = k==0 ? min_segment_size : min_segment_size/2<<k;
+            __TBB_ASSERT( result==segment_base(k+1)-segment_base(k), NULL );
+            return result;
+        }
+
+        typedef size_t size_type;
+
+        void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
+
+        size_type __TBB_EXPORTED_METHOD internal_capacity() const;
+
+        //! Requested size of vector
+        atomic<size_type> my_early_size;
+
+        /** Can be zero-initialized. */
+        struct segment_t {
+            /** Declared volatile because in weak memory model, must have ld.acq/st.rel  */
+            void* volatile array;
+#if TBB_DO_ASSERT
+            ~segment_t() {
+                __TBB_ASSERT( !array, "should have been set to NULL by clear" );
+            }
+#endif /* TBB_DO_ASSERT */
+        };
+
+        atomic<segment_t*> my_segment;
+
+        segment_t my_storage[2];
+
+        concurrent_vector_base() {
+            my_early_size = 0;
+            my_storage[0].array = NULL;
+            my_storage[1].array = NULL;
+            my_segment = my_storage;
+        }
+
+        //! An operation on an n-lement array starting at begin.
+        typedef void(__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
+
+        //! An operation on n-element destination array and n-element source array.
+        typedef void(__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
+
+        void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init );
+        void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op1 init );
+        size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init );
+        void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
+        void __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy, bool reclaim_storage );
+        void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base& src, size_type element_size, internal_array_op2 copy );
+        void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base& src, size_type element_size,
+                              internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
+private:
+        //! Private functionality that does not cross DLL boundary.
+        class helper;
+
+        friend class helper;
+    };
+
+    //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
+    /** Value is either the T or const T type of the container.
+        @ingroup containers */
+    template<typename Container, typename Value>
+    class vector_iterator 
+#if defined(_WIN64) && defined(_MSC_VER) 
+        // Ensure that Microsoft's internal template function _Val_type works correctly.
+        : public std::iterator<std::random_access_iterator_tag,Value>
+#endif /* defined(_WIN64) && defined(_MSC_VER) */
+    {
+        //! concurrent_vector over which we are iterating.
+        Container* my_vector;
+
+        //! Index into the vector 
+        size_t my_index;
+
+        //! Caches my_vector-&gt;internal_subscript(my_index)
+        /** NULL if cached value is not available */
+        mutable Value* my_item;
+    
+        template<typename C, typename T, typename U>
+        friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+
+        template<typename C, typename T, typename U>
+        friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
+    
+        template<typename C, typename U>
+        friend class internal::vector_iterator;
+
+#if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
+        template<typename T>
+        friend class tbb::concurrent_vector;
+#else
+public: // workaround for MSVC
+#endif 
+
+        vector_iterator( const Container& vector, size_t index ) : 
+            my_vector(const_cast<Container*>(&vector)), 
+            my_index(index), 
+            my_item(NULL)
+        {}
+
+    public:
+        //! Default constructor
+        vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
+
+        vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
+            my_vector(other.my_vector),
+            my_index(other.my_index),
+            my_item(other.my_item)
+        {}
+
+        vector_iterator operator+( ptrdiff_t offset ) const {
+            return vector_iterator( *my_vector, my_index+offset );
+        }
+        friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) {
+            return vector_iterator( *v.my_vector, v.my_index+offset );
+        }
+        vector_iterator operator+=( ptrdiff_t offset ) {
+            my_index+=offset;
+            my_item = NULL;
+            return *this;
+        }
+        vector_iterator operator-( ptrdiff_t offset ) const {
+            return vector_iterator( *my_vector, my_index-offset );
+        }
+        vector_iterator operator-=( ptrdiff_t offset ) {
+            my_index-=offset;
+            my_item = NULL;
+            return *this;
+        }
+        Value& operator*() const {
+            Value* item = my_item;
+            if( !item ) {
+                item = my_item = &my_vector->internal_subscript(my_index);
+            }
+            __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
+            return *item;
+        }
+        Value& operator[]( ptrdiff_t k ) const {
+            return my_vector->internal_subscript(my_index+k);
+        }
+        Value* operator->() const {return &operator*();}
+
+        //! Pre increment
+        vector_iterator& operator++() {
+            size_t k = ++my_index;
+            if( my_item ) {
+                // Following test uses 2's-complement wizardry and fact that
+                // min_segment_size is a power of 2.
+                if( (k& k-concurrent_vector<Container>::min_segment_size)==0 ) {
+                    // k is a power of two that is at least k-min_segment_size  
+                    my_item= NULL;
+                } else {
+                    ++my_item;
+                }
+            }
+            return *this;
+        }
+
+        //! Pre decrement
+        vector_iterator& operator--() {
+            __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); 
+            size_t k = my_index--;
+            if( my_item ) {
+                // Following test uses 2's-complement wizardry and fact that
+                // min_segment_size is a power of 2.
+                if( (k& k-concurrent_vector<Container>::min_segment_size)==0 ) {
+                    // k is a power of two that is at least k-min_segment_size  
+                    my_item= NULL;
+                } else {
+                    --my_item;
+                }
+            }
+            return *this;
+        }
+
+        //! Post increment
+        vector_iterator operator++(int) {
+            vector_iterator result = *this;
+            operator++();
+            return result;
+        }
+
+        //! Post decrement
+        vector_iterator operator--(int) {
+            vector_iterator result = *this;
+            operator--();
+            return result;
+        }
+
+        // STL support
+
+        typedef ptrdiff_t difference_type;
+        typedef Value value_type;
+        typedef Value* pointer;
+        typedef Value& reference;
+        typedef std::random_access_iterator_tag iterator_category;
+    };
+
+    template<typename Container, typename T, typename U>
+    bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return i.my_index==j.my_index;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(i==j);
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return i.my_index<j.my_index;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return j<i;
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(i<j);
+    }
+
+    template<typename Container, typename T, typename U>
+    bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return !(j<i);
+    }
+
+    template<typename Container, typename T, typename U>
+    ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
+        return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
+    }
+
+} // namespace internal
+//! @endcond
+
+//! Concurrent vector
+/** @ingroup containers */
+template<typename T>
+class concurrent_vector: private internal::concurrent_vector_base {
+public:
+    using internal::concurrent_vector_base::size_type;
+private:
+    template<typename I>
+    class generic_range_type: public blocked_range<I> {
+    public:
+        typedef T value_type;
+        typedef T& reference;
+        typedef const T& const_reference;
+        typedef I iterator;
+        typedef ptrdiff_t difference_type;
+        generic_range_type( I begin_, I end_, size_t grainsize ) : blocked_range<I>(begin_,end_,grainsize) {} 
+        generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
+    };
+
+    template<typename C, typename U>
+    friend class internal::vector_iterator;
+public:
+    typedef T& reference;
+    typedef const T& const_reference;
+
+    //! Construct empty vector.
+    concurrent_vector() {}
+
+    //! Copy a vector.
+    concurrent_vector( const concurrent_vector& vector ) {internal_copy(vector,sizeof(T),&copy_array);}
+
+    //! Assignment 
+    concurrent_vector& operator=( const concurrent_vector& vector ) {
+        if( this!=&vector )
+            internal_assign(vector,sizeof(T),&destroy_array,&assign_array,&copy_array);
+        return *this;
+    }
+
+    //! Clear and destroy vector.
+    ~concurrent_vector() {internal_clear(&destroy_array,/*reclaim_storage=*/true);}
+
+    //------------------------------------------------------------------------
+    // Concurrent operations
+    //------------------------------------------------------------------------
+    //! Grow by "delta" elements.
+    /** Returns old size. */
+    size_type grow_by( size_type delta ) {
+        return delta ? internal_grow_by( delta, sizeof(T), &initialize_array ) : my_early_size;
+    }
+
+    //! Grow array until it has at least n elements.
+    void grow_to_at_least( size_type n ) {
+        if( my_early_size<n )
+            internal_grow_to_at_least( n, sizeof(T), &initialize_array );
+    };
+
+    //! Push item 
+    size_type push_back( const_reference item ) {
+        size_type k;
+        new( internal_push_back(sizeof(T),k) ) T(item);
+        return k;
+    }
+
+    //! Get reference to element at given index.
+    /** This method is thread-safe for concurrent reads, and also while growing the vector,
+        as long as the calling thread has checked that index&lt;size(). */
+    reference operator[]( size_type index ) {
+        return internal_subscript(index);
+    }
+
+    //! Get const reference to element at given index.
+    const_reference operator[]( size_type index ) const {
+        return internal_subscript(index);
+    }
+
+    //------------------------------------------------------------------------
+    // Parallel algorithm support
+    //------------------------------------------------------------------------
+    typedef internal::vector_iterator<concurrent_vector,T> iterator;
+    typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
+
+#if !defined(_MSC_VER) || _CPPLIB_VER>=300 
+    // Assume ISO standard definition of std::reverse_iterator
+    typedef std::reverse_iterator<iterator> reverse_iterator;
+    typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+#else
+    // Use non-standard std::reverse_iterator
+    typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
+    typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
+#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
+
+    typedef generic_range_type<iterator> range_type;
+    typedef generic_range_type<const_iterator> const_range_type;
+
+    range_type range( size_t grainsize = 1 ) {
+        return range_type( begin(), end(), grainsize );
+    }
+
+    const_range_type range( size_t grainsize = 1 ) const {
+        return const_range_type( begin(), end(), grainsize );
+    }
+
+    //------------------------------------------------------------------------
+    // Capacity
+    //------------------------------------------------------------------------
+    //! Return size of vector.
+    size_type size() const {return my_early_size;}
+
+    //! Return size of vector.
+    bool empty() const {return !my_early_size;}
+
+    //! Maximum size to which array can grow without allocating more memory.
+    size_type capacity() const {return internal_capacity();}
+
+    //! Allocate enough space to grow to size n without having to allocate more memory later.
+    /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. 
+        The capacity afterwards may be bigger than the requested reservation. */
+    void reserve( size_type n ) {
+        if( n )
+            internal_reserve(n, sizeof(T), max_size());
+    }
+
+    //! Upper bound on argument to reserve.
+    size_type max_size() const {return (~size_t(0))/sizeof(T);}
+
+    //------------------------------------------------------------------------
+    // STL support
+    //------------------------------------------------------------------------
+
+    typedef T value_type;
+    typedef ptrdiff_t difference_type;
+
+    iterator begin() {return iterator(*this,0);}
+    iterator end() {return iterator(*this,size());}
+    const_iterator begin() const {return const_iterator(*this,0);}
+    const_iterator end() const {return const_iterator(*this,size());}
+
+    reverse_iterator rbegin() {return reverse_iterator(end());}
+    reverse_iterator rend() {return reverse_iterator(begin());}
+    const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
+    const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
+
+    //! Not thread safe
+    /** Does not change capacity. */
+    void clear() {internal_clear(&destroy_array,/*reclaim_storage=*/false);}       
+private:
+    //! Get reference to element at given index.
+    T& internal_subscript( size_type index ) const;
+
+    //! Construct n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC initialize_array( void* begin, size_type n );
+
+    //! Construct n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
+
+    //! Assign n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
+
+    //! Destroy n instances of T, starting at "begin".
+    static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
+};
+
+template<typename T>
+T& concurrent_vector<T>::internal_subscript( size_type index ) const {
+    __TBB_ASSERT( index<size(), "index out of bounds" );
+    segment_index_t k = segment_index_of( index );
+    size_type j = index-segment_base(k);
+    return static_cast<T*>(my_segment[k].array)[j];
+}
+
+template<typename T>
+void concurrent_vector<T>::initialize_array( void* begin, size_type n ) {
+    T* array = static_cast<T*>(begin);
+    for( size_type j=0; j<n; ++j )
+        new( &array[j] ) T();
+}
+
+template<typename T>
+void concurrent_vector<T>::copy_array( void* dst, const void* src, size_type n ) {
+    T* d = static_cast<T*>(dst);
+    const T* s = static_cast<const T*>(src);
+    for( size_type j=0; j<n; ++j )
+        new( &d[j] ) T(s[j]);
+}
+
+template<typename T>
+void concurrent_vector<T>::assign_array( void* dst, const void* src, size_type n ) {
+    T* d = static_cast<T*>(dst);
+    const T* s = static_cast<const T*>(src);
+    for( size_type j=0; j<n; ++j )
+        d[j] = s[j];
+}
+
+template<typename T>
+void concurrent_vector<T>::destroy_array( void* begin, size_type n ) {
+    T* array = static_cast<T*>(begin);
+    for( size_type j=n; j>0; --j )
+        array[j-1].~T();
+}
+
+} // namespace tbb
+
+#endif /* __TBB_concurrent_vector_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp
new file mode 100644 (file)
index 0000000..a19ec93
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "spin_rw_mutex_v2.h"
+#include "tbb/tbb_machine.h"
+#include "../tbb/itt_notify.h"
+
+namespace tbb {
+
+using namespace internal;
+
+static inline bool CAS(volatile uintptr_t &addr, uintptr_t newv, uintptr_t oldv) {
+    return __TBB_CompareAndSwapW((volatile void *)&addr, (intptr_t)newv, (intptr_t)oldv) == (intptr_t)oldv;
+}
+
+//! Signal that write lock is released
+void spin_rw_mutex::internal_itt_releasing(spin_rw_mutex *mutex) {
+    ITT_NOTIFY(sync_releasing, mutex);
+#if !DO_ITT_NOTIFY
+    (void)mutex;
+#endif
+}
+
+bool spin_rw_mutex::internal_acquire_writer(spin_rw_mutex *mutex)
+{
+    ITT_NOTIFY(sync_prepare, mutex);
+    atomic_backoff backoff;
+    for(;;) {
+        state_t s = mutex->state;
+        if( !(s & BUSY) ) { // no readers, no writers
+            if( CAS(mutex->state, WRITER, s) )
+                break; // successfully stored writer flag
+            backoff.reset(); // we could be very close to complete op.
+        } else if( !(s & WRITER_PENDING) ) { // no pending writers
+            __TBB_AtomicOR(&mutex->state, WRITER_PENDING);
+        }
+        backoff.pause();
+    }
+    ITT_NOTIFY(sync_acquired, mutex);
+    __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" );
+    return false;
+}
+
+//! Signal that write lock is released
+void spin_rw_mutex::internal_release_writer(spin_rw_mutex *mutex) {
+    __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" );
+    ITT_NOTIFY(sync_releasing, mutex);
+    mutex->state = 0; 
+}
+
+//! Acquire lock on given mutex.
+void spin_rw_mutex::internal_acquire_reader(spin_rw_mutex *mutex) {
+    ITT_NOTIFY(sync_prepare, mutex);
+    atomic_backoff backoff;
+    for(;;) {
+        state_t s = mutex->state;
+        if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests
+            if( CAS(mutex->state, s+ONE_READER, s) )
+                break; // successfully stored increased number of readers
+            backoff.reset(); // we could be very close to complete op.
+        }
+        backoff.pause();
+    }
+    ITT_NOTIFY(sync_acquired, mutex);
+    __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" );
+    __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" );
+}
+
+//! Upgrade reader to become a writer.
+/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
+bool spin_rw_mutex::internal_upgrade(spin_rw_mutex *mutex) {
+    state_t s = mutex->state;
+    __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " );
+    __TBB_ASSERT( !(s & WRITER), "invalid state before upgrade: active writer " );
+    // check and set writer-pending flag
+    // required conditions: either no pending writers, or we are the only reader
+    // (with multiple readers and pending writer, another upgrade could have been requested)
+    while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {
+        if( CAS(mutex->state, s | WRITER_PENDING, s) )
+        {
+            atomic_backoff backoff;
+            ITT_NOTIFY(sync_prepare, mutex);
+            while( (mutex->state & READERS) != ONE_READER ) // more than 1 reader
+                backoff.pause();
+            // the state should be 0...0110, i.e. 1 reader and waiting writer;
+            // both new readers and writers are blocked
+            __TBB_ASSERT(mutex->state == (ONE_READER | WRITER_PENDING),"invalid state when upgrading to writer");
+            mutex->state = WRITER;
+            ITT_NOTIFY(sync_acquired, mutex);
+            __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state after upgrade" );
+            return true; // successfully upgraded
+        } else {
+            s = mutex->state; // re-read
+        }
+    }
+    // slow reacquire
+    internal_release_reader(mutex);
+    return internal_acquire_writer(mutex); // always returns false
+}
+
+void spin_rw_mutex::internal_downgrade(spin_rw_mutex *mutex) {
+    __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state before downgrade" );
+    ITT_NOTIFY(sync_releasing, mutex);
+    mutex->state = ONE_READER;
+    __TBB_ASSERT( mutex->state & READERS, "invalid state after downgrade: no readers" );
+    __TBB_ASSERT( !(mutex->state & WRITER), "invalid state after downgrade: active writer" );
+}
+
+void spin_rw_mutex::internal_release_reader(spin_rw_mutex *mutex)
+{
+    __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" );
+    __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" );
+    ITT_NOTIFY(sync_releasing, mutex); // release reader
+    __TBB_FetchAndAddWrelease((volatile void *)&(mutex->state),-(intptr_t)ONE_READER);
+}
+
+bool spin_rw_mutex::internal_try_acquire_writer( spin_rw_mutex * mutex )
+{
+// for a writer: only possible to acquire if no active readers or writers
+    state_t s = mutex->state; // on Itanium, this volatile load has acquire semantic
+    if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101
+        if( CAS(mutex->state, WRITER, s) ) {
+            ITT_NOTIFY(sync_acquired, mutex);
+            return true; // successfully stored writer flag
+        }
+    return false;
+}
+
+bool spin_rw_mutex::internal_try_acquire_reader( spin_rw_mutex * mutex )
+{
+// for a reader: acquire if no active or waiting writers
+    state_t s = mutex->state;    // on Itanium, a load of volatile variable has acquire semantic
+    while( !(s & (WRITER|WRITER_PENDING)) ) // no writers
+        if( CAS(mutex->state, s+ONE_READER, s) ) {
+            ITT_NOTIFY(sync_acquired, mutex);
+            return true; // successfully stored increased number of readers
+        }
+    return false;
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.h b/deal.II/contrib/tbb/tbb30_104oss/src/old/spin_rw_mutex_v2.h
new file mode 100644 (file)
index 0000000..bc45277
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_spin_rw_mutex_H
+#define __TBB_spin_rw_mutex_H
+
+#include "tbb/tbb_stddef.h"
+
+namespace tbb {
+
+//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference
+/** @ingroup synchronization */
+class spin_rw_mutex {
+    //! @cond INTERNAL
+
+    //! Present so that 1.0 headers work with 1.1 dynamic library.
+    static void __TBB_EXPORTED_FUNC internal_itt_releasing(spin_rw_mutex *);
+
+    //! Internal acquire write lock.
+    static bool __TBB_EXPORTED_FUNC internal_acquire_writer(spin_rw_mutex *);
+
+    //! Out of line code for releasing a write lock.  
+    /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */
+    static void __TBB_EXPORTED_FUNC internal_release_writer(spin_rw_mutex *);
+
+    //! Internal acquire read lock.
+    static void __TBB_EXPORTED_FUNC internal_acquire_reader(spin_rw_mutex *);
+
+    //! Internal upgrade reader to become a writer.
+    static bool __TBB_EXPORTED_FUNC internal_upgrade(spin_rw_mutex *);
+
+    //! Out of line code for downgrading a writer to a reader.   
+    /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */
+    static void __TBB_EXPORTED_FUNC internal_downgrade(spin_rw_mutex *);
+
+    //! Internal release read lock.
+    static void __TBB_EXPORTED_FUNC internal_release_reader(spin_rw_mutex *);
+
+    //! Internal try_acquire write lock.
+    static bool __TBB_EXPORTED_FUNC internal_try_acquire_writer(spin_rw_mutex *);
+
+    //! Internal try_acquire read lock.
+    static bool __TBB_EXPORTED_FUNC internal_try_acquire_reader(spin_rw_mutex *);
+
+    //! @endcond
+public:
+    //! Construct unacquired mutex.
+    spin_rw_mutex() : state(0) {}
+
+#if TBB_DO_ASSERT
+    //! Destructor asserts if the mutex is acquired, i.e. state is zero.
+    ~spin_rw_mutex() {
+        __TBB_ASSERT( !state, "destruction of an acquired mutex");
+    };
+#endif /* TBB_DO_ASSERT */
+
+    //! The scoped locking pattern
+    /** It helps to avoid the common problem of forgetting to release lock.
+        It also nicely provides the "node" for queuing locks. */
+    class scoped_lock : private internal::no_copy {
+    public:
+        //! Construct lock that has not acquired a mutex.
+        /** Equivalent to zero-initialization of *this. */
+        scoped_lock() : mutex(NULL) {}
+
+        //! Acquire lock on given mutex.
+        /** Upon entry, *this should not be in the "have acquired a mutex" state. */
+        scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) {
+            acquire(m, write);
+        }
+
+        //! Release lock (if lock is held).
+        ~scoped_lock() {
+            if( mutex ) release();
+        }
+
+        //! Acquire lock on given mutex.
+        void acquire( spin_rw_mutex& m, bool write = true ) {
+            __TBB_ASSERT( !mutex, "holding mutex already" );
+            is_writer = write; 
+            mutex = &m;
+            if( write ) internal_acquire_writer(mutex);
+            else        internal_acquire_reader(mutex);
+        }
+
+        //! Upgrade reader to become a writer.
+        /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
+        bool upgrade_to_writer() {
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            __TBB_ASSERT( !is_writer, "not a reader" );
+            is_writer = true; 
+            return internal_upgrade(mutex);
+        }
+
+        //! Release lock.
+        void release() {
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            spin_rw_mutex *m = mutex; 
+            mutex = NULL;
+            if( is_writer ) {
+#if TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT
+                internal_release_writer(m);
+#else
+                m->state = 0; 
+#endif /* TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT */
+            } else {
+                internal_release_reader(m);
+            }
+        };
+
+        //! Downgrade writer to become a reader.
+        bool downgrade_to_reader() {
+#if TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT
+            __TBB_ASSERT( mutex, "lock is not acquired" );
+            __TBB_ASSERT( is_writer, "not a writer" );
+            internal_downgrade(mutex);
+#else
+            mutex->state = 4; // Bit 2 - reader, 00..00100
+#endif
+            is_writer = false;
+
+            return true;
+        }
+
+        //! Try acquire lock on given mutex.
+        bool try_acquire( spin_rw_mutex& m, bool write = true ) {
+            __TBB_ASSERT( !mutex, "holding mutex already" );
+            bool result;
+            is_writer = write; 
+            result = write? internal_try_acquire_writer(&m)
+                          : internal_try_acquire_reader(&m);
+            if( result ) mutex = &m;
+            return result;
+        }
+
+    private:
+        //! The pointer to the current mutex that is held, or NULL if no mutex is held.
+        spin_rw_mutex* mutex;
+
+        //! True if holding a writer lock, false if holding a reader lock.
+        /** Not defined if not holding a lock. */
+        bool is_writer;
+    };
+
+private:
+    typedef uintptr_t state_t;
+    static const state_t WRITER = 1;
+    static const state_t WRITER_PENDING = 2;
+    static const state_t READERS = ~(WRITER | WRITER_PENDING);
+    static const state_t ONE_READER = 4;
+    static const state_t BUSY = WRITER | READERS;
+    /** Bit 0 = writer is holding lock
+        Bit 1 = request by a writer to acquire lock (hint to readers to wait)
+        Bit 2..N = number of readers holding lock */
+    volatile state_t state;
+};
+
+} // namespace ThreadingBuildingBlocks
+
+#endif /* __TBB_spin_rw_mutex_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/task_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/task_v2.cpp
new file mode 100644 (file)
index 0000000..7deccfc
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/*  This compilation unit provides definition of task::destroy( task& )
+    that is binary compatible with TBB 2.x. In TBB 3.0, the method became
+    static, and its name decoration changed, though the definition remained.
+
+    The macro switch should be set prior to including task.h
+    or any TBB file that might bring task.h up.
+*/
+#define __TBB_DEPRECATED_TASK_INTERFACE 1
+#include "tbb/task.h"
+
+namespace tbb {
+
+void task::destroy( task& victim ) {
+    // Forward to static version
+    task_base::destroy( victim );
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp
new file mode 100644 (file)
index 0000000..c5ed6ec
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_queue.h"
+#include "tbb/atomic.h"
+#include "tbb/tick_count.h"
+
+#include "../test/harness_assert.h"
+#include "../test/harness.h"
+
+static tbb::atomic<long> FooConstructed;
+static tbb::atomic<long> FooDestroyed;
+
+class Foo {
+    enum state_t{
+        LIVE=0x1234,
+        DEAD=0xDEAD
+    };
+    state_t state;
+public:
+    int thread_id;
+    int serial;
+    Foo() : state(LIVE) {
+        ++FooConstructed;
+    }
+    Foo( const Foo& item ) : state(LIVE) {
+        ASSERT( item.state==LIVE, NULL );
+        ++FooConstructed;
+        thread_id = item.thread_id;
+        serial = item.serial;
+    }
+    ~Foo() {
+        ASSERT( state==LIVE, NULL );
+        ++FooDestroyed;
+        state=DEAD;
+        thread_id=0xDEAD;
+        serial=0xDEAD;
+    }
+    void operator=( Foo& item ) {
+        ASSERT( item.state==LIVE, NULL );
+        ASSERT( state==LIVE, NULL );
+        thread_id = item.thread_id;
+        serial = item.serial;
+    }
+    bool is_const() {return false;}
+    bool is_const() const {return true;}
+};
+
+const size_t MAXTHREAD = 256;
+
+static int Sum[MAXTHREAD];
+
+//! Count of various pop operations
+/** [0] = pop_if_present that failed
+    [1] = pop_if_present that succeeded
+    [2] = pop */
+static tbb::atomic<long> PopKind[3];
+
+const int M = 10000;
+
+struct Body {
+    tbb::concurrent_queue<Foo>* queue;
+    const int nthread;
+    Body( int nthread_ ) : nthread(nthread_) {}
+    void operator()( long thread_id ) const {
+        long pop_kind[3] = {0,0,0};
+        int serial[MAXTHREAD+1];
+        memset( serial, 0, nthread*sizeof(unsigned) );
+        ASSERT( thread_id<nthread, NULL );
+
+        long sum = 0;
+        for( long j=0; j<M; ++j ) {
+            Foo f;
+            f.thread_id = 0xDEAD;
+            f.serial = 0xDEAD;
+            bool prepopped = false;
+            if( j&1 ) {
+                prepopped = queue->pop_if_present(f);
+                ++pop_kind[prepopped];
+            }
+            Foo g;
+            g.thread_id = thread_id;
+            g.serial = j+1;
+            queue->push( g );
+            if( !prepopped ) {
+                queue->pop(f);
+                ++pop_kind[2];
+            }
+            ASSERT( f.thread_id<=nthread, NULL );
+            ASSERT( f.thread_id==nthread || serial[f.thread_id]<f.serial, "partial order violation" );
+            serial[f.thread_id] = f.serial;
+            sum += f.serial-1;
+        }
+        Sum[thread_id] = sum;
+        for( int k=0; k<3; ++k )
+            PopKind[k] += pop_kind[k];
+    }
+};
+
+void TestPushPop( int prefill, ptrdiff_t capacity, int nthread ) {
+    ASSERT( nthread>0, "nthread must be positive" );
+    if( prefill+1>=capacity )
+        return;
+    bool success = false;
+    for( int k=0; k<3; ++k )
+        PopKind[k] = 0;
+    for( int trial=0; !success; ++trial ) {
+        FooConstructed = 0;
+        FooDestroyed = 0;
+        Body body(nthread);
+        tbb::concurrent_queue<Foo> queue;
+        queue.set_capacity( capacity );
+        body.queue = &queue;
+        for( int i=0; i<prefill; ++i ) {
+            Foo f;
+            f.thread_id = nthread;
+            f.serial = 1+i;
+            queue.push(f);
+            ASSERT( queue.size()==i+1, NULL );
+            ASSERT( !queue.empty(), NULL );
+        }
+        tbb::tick_count t0 = tbb::tick_count::now();
+        NativeParallelFor( nthread, body );
+        tbb::tick_count t1 = tbb::tick_count::now();
+        double timing = (t1-t0).seconds();
+        if( Verbose )
+            printf("prefill=%d capacity=%d time = %g = %g nsec/operation\n", prefill, int(capacity), timing, timing/(2*M*nthread)*1.E9);
+        int sum = 0;
+        for( int k=0; k<nthread; ++k )
+            sum += Sum[k];
+        int expected = nthread*((M-1)*M/2) + ((prefill-1)*prefill)/2;
+        for( int i=prefill; --i>=0; ) {
+            ASSERT( !queue.empty(), NULL );
+            Foo f;
+            queue.pop(f);
+            ASSERT( queue.size()==i, NULL );
+            sum += f.serial-1;
+        }
+        ASSERT( queue.empty(), NULL );
+        ASSERT( queue.size()==0, NULL );
+        if( sum!=expected )
+            printf("sum=%d expected=%d\n",sum,expected);
+        ASSERT( FooConstructed==FooDestroyed, NULL );
+
+        success = true;
+        if( nthread>1 && prefill==0 ) {
+            // Check that pop_if_present got sufficient exercise
+            for( int k=0; k<2; ++k ) {
+#if (_WIN32||_WIN64)
+                // The TBB library on Windows seems to have a tough time generating
+                // the desired interleavings for pop_if_present, so the code tries longer, and settles
+                // for fewer desired interleavings.
+                const int max_trial = 100;
+                const int min_requirement = 20;
+#else
+                const int min_requirement = 100;
+                const int max_trial = 20;
+#endif /* _WIN32||_WIN64 */
+                if( PopKind[k]<min_requirement ) {
+                    if( trial>=max_trial ) {
+                        if( Verbose )
+                            printf("Warning: %d threads had only %ld pop_if_present operations %s after %d trials (expected at least %d). "
+                                    "This problem may merely be unlucky scheduling. "
+                                    "Investigate only if it happens repeatedly.\n",
+                                    nthread, long(PopKind[k]), k==0?"failed":"succeeded", max_trial, min_requirement);
+                        else
+                            printf("Warning: the number of %s pop_if_present operations is less than expected for %d threads. Investigate if it happens repeatedly.\n",
+                                   k==0?"failed":"succeeded", nthread );
+                    } else {
+                        success = false;
+                    }
+               }
+            }
+        }
+    }
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAux( Iterator1 i, Iterator2 j, int size ) {
+    // Now test iteration
+    Iterator1 old_i;
+    for( int k=0; k<size; ++k ) {
+        ASSERT( i!=j, NULL );
+        ASSERT( !(i==j), NULL );
+        Foo f;
+        if( k&1 ) {
+            // Test pre-increment
+            f = *old_i++;
+            // Test assignment
+            i = old_i;
+        } else {
+            // Test post-increment
+            f=*i++;
+            if( k<size-1 ) {
+                // Test "->"
+                ASSERT( k+2==i->serial, NULL );
+            }
+            // Test assignment
+            old_i = i;
+        }
+        ASSERT( k+1==f.serial, NULL );
+    }
+    ASSERT( !(i!=j), NULL );
+    ASSERT( i==j, NULL );
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAssignment( Iterator2 j ) {
+    Iterator1 i(j);
+    ASSERT( i==j, NULL );
+    ASSERT( !(i!=j), NULL );
+    Iterator1 k;
+    k = j;
+    ASSERT( k==j, NULL );
+    ASSERT( !(k!=j), NULL );
+}
+
+//! Test the iterators for concurrent_queue
+void TestIterator() {
+    tbb::concurrent_queue<Foo> queue;
+    tbb::concurrent_queue<Foo>& const_queue = queue;
+    for( int j=0; j<500; ++j ) {
+        TestIteratorAux( queue.begin(), queue.end(), j );
+        TestIteratorAux( const_queue.begin(), const_queue.end(), j );
+        TestIteratorAux( const_queue.begin(), queue.end(), j );
+        TestIteratorAux( queue.begin(), const_queue.end(), j );
+        Foo f;
+        f.serial = j+1;
+        queue.push(f);
+    }
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::const_iterator>( const_queue.begin() );
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::const_iterator>( queue.begin() );
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::iterator>( queue.begin() );
+}
+
+void TestConcurrenetQueueType() {
+    AssertSameType( tbb::concurrent_queue<Foo>::value_type(), Foo() );
+    Foo f;
+    const Foo g;
+    tbb::concurrent_queue<Foo>::reference r = f;
+    ASSERT( &r==&f, NULL );
+    ASSERT( !r.is_const(), NULL );
+    tbb::concurrent_queue<Foo>::const_reference cr = g;
+    ASSERT( &cr==&g, NULL );
+    ASSERT( cr.is_const(), NULL );
+}
+
+template<typename T>
+void TestEmptyQueue() {
+    const tbb::concurrent_queue<T> queue;
+    ASSERT( queue.size()==0, NULL );
+    ASSERT( queue.capacity()>0, NULL );
+    ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL );
+}
+
+void TestFullQueue() {
+    for( int n=0; n<10; ++n ) {
+        FooConstructed = 0;
+        FooDestroyed = 0;
+        tbb::concurrent_queue<Foo> queue;
+        queue.set_capacity(n);
+        for( int i=0; i<=n; ++i ) {
+            Foo f;
+            f.serial = i;
+            bool result = queue.push_if_not_full( f );
+            ASSERT( result==(i<n), NULL );
+        }
+        for( int i=0; i<=n; ++i ) {
+            Foo f;
+            bool result = queue.pop_if_present( f );
+            ASSERT( result==(i<n), NULL );
+            ASSERT( !result || f.serial==i, NULL );
+        }
+        ASSERT( FooConstructed==FooDestroyed, NULL );
+    }
+}
+
+template<typename T>
+struct TestNegativeQueueBody {
+    tbb::concurrent_queue<T>& queue;
+    const int nthread;
+    TestNegativeQueueBody( tbb::concurrent_queue<T>& q, int n ) : queue(q), nthread(n) {}
+    void operator()( int k ) const {
+        if( k==0 ) {
+            int number_of_pops = nthread-1;
+            // Wait for all pops to pend.
+            while( queue.size()>-number_of_pops ) {
+                __TBB_Yield();
+            }
+            for( int i=0; ; ++i ) {
+                ASSERT( queue.size()==i-number_of_pops, NULL );
+                ASSERT( queue.empty()==(queue.size()<=0), NULL );
+                if( i==number_of_pops ) break;
+                // Satisfy another pop
+                queue.push( T() );
+            }
+        } else {
+            // Pop item from queue
+            T item;
+            queue.pop(item);
+        }
+    }
+};
+
+//! Test a queue with a negative size.
+template<typename T>
+void TestNegativeQueue( int nthread ) {
+    tbb::concurrent_queue<T> queue;
+    NativeParallelFor( nthread, TestNegativeQueueBody<T>(queue,nthread) );
+}
+
+int TestMain () {
+    TestEmptyQueue<char>();
+    TestEmptyQueue<Foo>();
+    TestFullQueue();
+    TestConcurrenetQueueType();
+    TestIterator();
+
+    // Test concurrent operations
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        TestNegativeQueue<Foo>(nthread);
+        for( int prefill=0; prefill<64; prefill+=(1+prefill/3) ) {
+            TestPushPop(prefill,ptrdiff_t(-1),nthread);
+            TestPushPop(prefill,ptrdiff_t(1),nthread);
+            TestPushPop(prefill,ptrdiff_t(2),nthread);
+            TestPushPop(prefill,ptrdiff_t(10),nthread);
+            TestPushPop(prefill,ptrdiff_t(100),nthread);
+        }
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp
new file mode 100644 (file)
index 0000000..62fa5f1
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "concurrent_vector_v2.h"
+#include <cstdio>
+#include <cstdlib>
+#include "../test/harness_assert.h"
+
+tbb::atomic<long> FooCount;
+
+//! Problem size
+const size_t N = 500000;
+
+struct Foo {
+    int my_bar;
+public:
+    enum State {
+        DefaultInitialized=0x1234,
+        CopyInitialized=0x89ab,
+        Destroyed=0x5678
+    } state;
+    int& bar() {
+        ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL );
+        return my_bar;
+    }
+    int bar() const {
+        ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL );
+        return my_bar;
+    }
+    static const int initial_value_of_bar = 42;
+    Foo() {
+        state = DefaultInitialized;
+        ++FooCount;
+        my_bar = initial_value_of_bar;
+    }
+    Foo( const Foo& foo ) {
+        state = CopyInitialized;
+        ++FooCount;
+        my_bar = foo.my_bar;
+    }
+    ~Foo() {
+        ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL );
+        state = Destroyed;
+        my_bar = ~initial_value_of_bar;
+        --FooCount;
+    }
+    bool is_const() const {return true;}
+    bool is_const() {return false;}
+};
+
+class FooWithAssign: public Foo {
+public:
+    void operator=( const FooWithAssign& x ) {
+        ASSERT( x.state==DefaultInitialized||x.state==CopyInitialized, NULL );
+        ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL );
+        my_bar = x.my_bar;
+    } 
+};
+
+inline void NextSize( int& s ) {
+    if( s<=32 ) ++s;
+    else s += s/10;     
+}
+
+static void CheckVector( const tbb::concurrent_vector<Foo>& cv, size_t expected_size, size_t old_size ) {
+    ASSERT( cv.size()==expected_size, NULL );
+    ASSERT( cv.empty()==(expected_size==0), NULL );
+    for( int j=0; j<int(expected_size); ++j ) {
+        if( cv[j].bar()!=~j )
+            std::printf("ERROR on line %d for old_size=%ld expected_size=%ld j=%d\n",__LINE__,long(old_size),long(expected_size),j);
+    }
+}
+
+void TestResizeAndCopy() {
+    typedef tbb::concurrent_vector<Foo> vector_t;
+    for( int old_size=0; old_size<=128; NextSize( old_size ) ) {
+        for( int new_size=old_size; new_size<=128; NextSize( new_size ) ) {
+            long count = FooCount;
+            vector_t v;
+            ASSERT( count==FooCount, NULL );
+            v.grow_by(old_size);
+            ASSERT( count+old_size==FooCount, NULL );
+            for( int j=0; j<old_size; ++j )
+                v[j].bar() = j*j;
+            v.grow_to_at_least(new_size);
+            ASSERT( count+new_size==FooCount, NULL );
+            for( int j=0; j<new_size; ++j ) {
+                int expected = j<old_size ? j*j : Foo::initial_value_of_bar;
+                if( v[j].bar()!=expected ) 
+                    std::printf("ERROR on line %d for old_size=%ld new_size=%ld v[%ld].bar()=%d != %d\n",__LINE__,long(old_size),long(new_size),long(j),v[j].bar(), expected);
+            }
+            ASSERT( v.size()==size_t(new_size), NULL );
+            for( int j=0; j<new_size; ++j ) {
+                v[j].bar() = ~j;
+            }
+            const vector_t& cv = v;
+            // Try copy constructor
+            vector_t copy_of_v(cv);
+            CheckVector(cv,new_size,old_size);
+            v.clear();
+            ASSERT( v.empty(), NULL );
+            CheckVector(copy_of_v,new_size,old_size);
+        }
+    }
+}
+
+void TestCapacity() {
+    for( size_t old_size=0; old_size<=10000; old_size=(old_size<5 ? old_size+1 : 3*old_size) ) {
+        for( size_t new_size=0; new_size<=10000; new_size=(new_size<5 ? new_size+1 : 3*new_size) ) {
+            long count = FooCount; 
+            {
+                typedef tbb::concurrent_vector<Foo> vector_t;
+                vector_t v;
+                v.reserve( old_size );
+                ASSERT( v.capacity()>=old_size, NULL );
+                v.reserve( new_size );
+                ASSERT( v.capacity()>=old_size, NULL );
+                ASSERT( v.capacity()>=new_size, NULL );
+                for( size_t i=0; i<2*new_size; ++i ) {
+                    ASSERT( size_t(FooCount)==count+i, NULL );
+                    size_t j = v.grow_by(1);
+                    ASSERT( j==i, NULL );
+                }
+            }
+            ASSERT( FooCount==count, NULL );
+        }
+    } 
+}
+
+struct AssignElement {
+    typedef tbb::concurrent_vector<int>::range_type::iterator iterator;
+    iterator base;
+    void operator()( const tbb::concurrent_vector<int>::range_type& range ) const {
+        for( iterator i=range.begin(); i!=range.end(); ++i ) {
+            if( *i!=0 )
+                std::printf("ERROR for v[%ld]\n", long(i-base));
+            *i = int(i-base);
+        }
+    }
+    AssignElement( iterator base_ ) : base(base_) {}
+};
+
+struct CheckElement {
+    typedef tbb::concurrent_vector<int>::const_range_type::iterator iterator;
+    iterator base;
+    void operator()( const tbb::concurrent_vector<int>::const_range_type& range ) const {
+        for( iterator i=range.begin(); i!=range.end(); ++i )
+            if( *i != int(i-base) )
+                std::printf("ERROR for v[%ld]\n", long(i-base));
+    }
+    CheckElement( iterator base_ ) : base(base_) {}
+};
+
+#include "tbb/tick_count.h"
+#include "tbb/parallel_for.h"
+#include "../test/harness.h"
+
+void TestParallelFor( int nthread ) {
+    typedef tbb::concurrent_vector<int> vector_t;
+    vector_t v;
+    v.grow_to_at_least(N);  
+    tbb::tick_count t0 = tbb::tick_count::now();
+    if( Verbose )
+        std::printf("Calling parallel_for.h with %ld threads\n",long(nthread));
+    tbb::parallel_for( v.range(10000), AssignElement(v.begin()) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    const vector_t& u = v;      
+    tbb::parallel_for( u.range(10000), CheckElement(u.begin()) );
+    tbb::tick_count t2 = tbb::tick_count::now();
+    if( Verbose )
+        std::printf("Time for parallel_for.h: assign time = %8.5f, check time = %8.5f\n",
+               (t1-t0).seconds(),(t2-t1).seconds());
+    for( long i=0; size_t(i)<v.size(); ++i )
+        if( v[i]!=i )
+            std::printf("ERROR for v[%ld]\n", i);
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAssignment( Iterator2 j ) {
+    Iterator1 i(j);
+    ASSERT( i==j, NULL );
+    ASSERT( !(i!=j), NULL );
+    Iterator1 k;
+    k = j;
+    ASSERT( k==j, NULL );
+    ASSERT( !(k!=j), NULL );
+}
+
+template<typename Iterator, typename T>
+void TestIteratorTraits() {
+    AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); 
+    AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) ); 
+    AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) ); 
+    AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::random_access_iterator_tag*>(0) );
+    T x;
+    typename Iterator::reference xr = x;
+    typename Iterator::pointer xp = &x;
+    ASSERT( &xr==xp, NULL );
+}
+
+template<typename Vector, typename Iterator>
+void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) {
+    typename Vector::const_reference pref = *cp;
+    if( pref.bar()!=i )
+        std::printf("ERROR for u[%ld] using const_iterator\n", long(i));
+    typename Vector::difference_type delta = cp-u.begin();
+    ASSERT( delta==i, NULL );
+    if( u[i].bar()!=i )
+        std::printf("ERROR for u[%ld] using subscripting\n", long(i));
+    ASSERT( u.begin()[i].bar()==i, NULL );
+}
+
+template<typename Iterator1, typename Iterator2, typename V> 
+void CheckIteratorComparison( V& u ) {
+    Iterator1 i = u.begin();
+    for( int i_count=0; i_count<100; ++i_count ) {
+        Iterator2 j = u.begin();
+        for( int j_count=0; j_count<100; ++j_count ) {
+            ASSERT( (i==j)==(i_count==j_count), NULL );
+            ASSERT( (i!=j)==(i_count!=j_count), NULL );
+            ASSERT( (i-j)==(i_count-j_count), NULL );
+            ASSERT( (i<j)==(i_count<j_count), NULL );
+            ASSERT( (i>j)==(i_count>j_count), NULL );
+            ASSERT( (i<=j)==(i_count<=j_count), NULL );
+            ASSERT( (i>=j)==(i_count>=j_count), NULL );
+            ++j;
+        }
+        ++i;
+    }
+}
+
+//! Test sequential iterators for vector type V.
+/** Also does timing. */
+template<typename V>
+void TestSequentialFor() {
+    V v;
+    v.grow_by(N);
+
+    // Check iterator 
+    tbb::tick_count t0 = tbb::tick_count::now();
+    typename V::iterator p = v.begin();
+    ASSERT( !(*p).is_const(), NULL );
+    ASSERT( !p->is_const(), NULL );
+    for( int i=0; size_t(i)<v.size(); ++i, ++p ) {
+        if( (*p).state!=Foo::DefaultInitialized )
+            std::printf("ERROR for v[%ld]\n", long(i));
+        typename V::reference pref = *p;
+        pref.bar() = i;
+        typename V::difference_type delta = p-v.begin();
+        ASSERT( delta==i, NULL );
+        ASSERT( -delta<=0, "difference type not signed?" );
+    }
+    tbb::tick_count t1 = tbb::tick_count::now();
+    
+    // Check const_iterator going forwards
+    const V& u = v;     
+    typename V::const_iterator cp = u.begin();
+    ASSERT( (*cp).is_const(), NULL );
+    ASSERT( cp->is_const(), NULL );
+    for( int i=0; size_t(i)<u.size(); ++i, ++cp ) {
+        CheckConstIterator(u,i,cp);
+    }
+    tbb::tick_count t2 = tbb::tick_count::now();
+    if( Verbose )
+        std::printf("Time for serial for:  assign time = %8.5f, check time = %8.5f\n",
+               (t1-t0).seconds(),(t2-t1).seconds());
+
+    // Now go backwards
+    cp = u.end();
+    for( int i=int(u.size()); i>0; ) {
+        --i;
+        --cp;
+        if( i>0 ) {
+            typename V::const_iterator cp_old = cp--;
+            int here = (*cp_old).bar();
+            ASSERT( here==u[i].bar(), NULL );
+            typename V::const_iterator cp_new = cp++;
+            int prev = (*cp_new).bar();
+            ASSERT( prev==u[i-1].bar(), NULL );
+        }
+        CheckConstIterator(u,i,cp);
+    }
+
+    // Now go forwards and backwards
+    cp = u.begin();
+    ptrdiff_t j = 0;
+    for( size_t i=0; i<u.size(); ++i ) {
+        CheckConstIterator(u,int(j),cp);
+        typename V::difference_type delta = i*3 % u.size();
+        if( 0<=j+delta && size_t(j+delta)<u.size() ) {
+            cp += delta;
+            j += delta; 
+        } 
+        delta = i*7 % u.size();
+        if( 0<=j-delta && size_t(j-delta)<u.size() ) {
+            if( i&1 ) 
+                cp -= delta;            // Test operator-=
+            else
+                cp = cp - delta;        // Test operator-
+            j -= delta; 
+        } 
+    }
+    
+    for( int i=0; size_t(i)<u.size(); i=(i<50?i+1:i*3) )
+        for( int j=-i; size_t(i+j)<u.size(); j=(j<50?j+1:j*5) ) {
+            ASSERT( (u.begin()+i)[j].bar()==i+j, NULL );
+            ASSERT( (v.begin()+i)[j].bar()==i+j, NULL );
+            ASSERT( (i+u.begin())[j].bar()==i+j, NULL );
+            ASSERT( (i+v.begin())[j].bar()==i+j, NULL );
+        }
+
+    CheckIteratorComparison<typename V::iterator, typename V::iterator>(v);
+    CheckIteratorComparison<typename V::iterator, typename V::const_iterator>(v);
+    CheckIteratorComparison<typename V::const_iterator, typename V::iterator>(v);
+    CheckIteratorComparison<typename V::const_iterator, typename V::const_iterator>(v);
+
+    TestIteratorAssignment<typename V::const_iterator>( u.begin() );
+    TestIteratorAssignment<typename V::const_iterator>( v.begin() );
+    TestIteratorAssignment<typename V::iterator>( v.begin() );
+
+    // Check reverse_iterator 
+    typename V::reverse_iterator rp = v.rbegin();
+    for( size_t i=v.size(); i>0; --i, ++rp ) {
+        typename V::reference pref = *rp;
+        ASSERT( size_t(pref.bar())==i-1, NULL );
+        ASSERT( rp!=v.rend(), NULL );
+    }
+    ASSERT( rp==v.rend(), NULL );
+    
+    // Check const_reverse_iterator 
+    typename V::const_reverse_iterator crp = u.rbegin();
+    for( size_t i=v.size(); i>0; --i, ++crp ) {
+        typename V::const_reference cpref = *crp;
+        ASSERT( size_t(cpref.bar())==i-1, NULL );
+        ASSERT( crp!=u.rend(), NULL );
+    }
+    ASSERT( crp==u.rend(), NULL );
+
+    TestIteratorAssignment<typename V::const_reverse_iterator>( u.rbegin() );
+    TestIteratorAssignment<typename V::reverse_iterator>( v.rbegin() );
+}
+
+static const size_t Modulus = 7;
+
+typedef tbb::concurrent_vector<Foo> MyVector;
+
+class GrowToAtLeast {
+    MyVector& my_vector;
+public:
+    void operator()( const tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            size_t n = my_vector.size();
+            size_t k = n==0 ? 0 : i % (2*n+1);
+            my_vector.grow_to_at_least(k+1);
+            ASSERT( my_vector.size()>=k+1, NULL );
+        }
+    }
+    GrowToAtLeast( MyVector& vector ) : my_vector(vector) {}
+};
+
+void TestConcurrentGrowToAtLeast() {
+    MyVector v;
+    for( size_t s=1; s<1000; s*=10 ) {
+        tbb::parallel_for( tbb::blocked_range<size_t>(0,1000000,100), GrowToAtLeast(v) );
+    }
+}
+
+//! Test concurrent invocations of method concurrent_vector::grow_by
+class GrowBy {
+    MyVector& my_vector;
+public:
+    void operator()( const tbb::blocked_range<int>& range ) const {
+        for( int i=range.begin(); i!=range.end(); ++i ) {
+            if( i%3 ) {
+                Foo& element = my_vector[my_vector.grow_by(1)]; 
+                element.bar() = i;
+            } else {
+                Foo f;
+                f.bar() = i;
+                size_t k = my_vector.push_back( f );
+                ASSERT( my_vector[k].bar()==i, NULL );
+            }
+        }
+    }
+    GrowBy( MyVector& vector ) : my_vector(vector) {}
+};
+
+//! Test concurrent invocations of method concurrent_vector::grow_by
+void TestConcurrentGrowBy( int nthread ) {
+    int m = 100000;
+    MyVector v;
+    tbb::parallel_for( tbb::blocked_range<int>(0,m,1000), GrowBy(v) );
+    ASSERT( v.size()==size_t(m), NULL );
+
+    // Verify that v is a permutation of 0..m
+    int inversions = 0;
+    bool* found = new bool[m];
+    memset( found, 0, m );
+    for( int i=0; i<m; ++i ) {
+        int index = v[i].bar();
+        ASSERT( !found[index], NULL );
+        found[index] = true;
+        if( i>0 )
+            inversions += v[i].bar()<v[i-1].bar();
+    }
+    for( int i=0; i<m; ++i ) {
+        ASSERT( found[i], NULL );
+        ASSERT( nthread>1 || v[i].bar()==i, "sequential execution is wrong" );
+    }
+    delete[] found;
+    if( nthread>1 && inversions<m/10 )
+        std::printf("Warning: not much concurrency in TestConcurrentGrowBy\n");
+}
+
+//! Test the assignment operator
+void TestAssign() {
+    typedef tbb::concurrent_vector<FooWithAssign> vector_t;
+    for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) {
+        for( int src_size=2; src_size<=128; NextSize( src_size ) ) {
+            vector_t u;
+            u.grow_to_at_least(src_size);
+            for( int i=0; i<src_size; ++i )
+                u[i].bar() = i*i;
+            vector_t v;
+            v.grow_to_at_least(dst_size);
+            for( int i=0; i<dst_size; ++i )
+                v[i].bar() = -i;
+            v = u;
+            u.clear();
+            ASSERT( u.size()==0, NULL );
+            ASSERT( v.size()==size_t(src_size), NULL );
+            for( int i=0; i<src_size; ++i )
+                ASSERT( v[i].bar()==(i*i), NULL );
+        }
+    }    
+}
+
+//------------------------------------------------------------------------
+// Regression test for problem where on oversubscription caused
+// concurrent_vector::grow_by to run very slowly (TR#196).
+//------------------------------------------------------------------------
+
+#include "tbb/task_scheduler_init.h"
+#include <math.h>
+
+typedef unsigned long Number;
+
+static tbb::concurrent_vector<Number> Primes;
+
+class FindPrimes {
+    bool is_prime( Number val ) const {
+        int limit, factor = 3;
+        if( val<5u ) 
+            return val==2;
+        else {
+            limit = long(sqrtf(float(val))+0.5f);
+            while( factor<=limit && val % factor )
+                ++factor;
+            return factor>limit;
+        }
+    }
+public:
+    void operator()( const tbb::blocked_range<Number>& r ) const {
+        for( Number i=r.begin(); i!=r.end(); ++i ) { 
+            if( i%2 && is_prime(i) ) {
+                Primes[Primes.grow_by(1)] = i;
+            }
+        }
+    }
+};
+
+static double TimeFindPrimes( int nthread ) {
+    Primes.clear();
+    tbb::task_scheduler_init init(nthread);
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for( tbb::blocked_range<Number>(0,1000000,500), FindPrimes() );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    return (t1-t0).seconds();
+}
+
+static void TestFindPrimes() {
+    // Time fully subscribed run.
+    double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic );
+
+    // Time parallel run that is very likely oversubscribed.  
+    double t128 = TimeFindPrimes(128);
+
+    if( Verbose ) 
+        std::printf("TestFindPrimes: t2==%g t128=%g\n", t2, t128 );
+
+    // We allow the 128-thread run a little extra time to allow for thread overhead.
+    // Theoretically, following test will fail on machine with >128 processors.
+    // But that situation is not going to come up in the near future,
+    // and the generalization to fix the issue is not worth the trouble.
+    if( t128>1.10*t2 ) {
+        std::printf("Warning: grow_by is pathetically slow: t2==%g t128=%g\n", t2, t128);
+    } 
+}
+
+//------------------------------------------------------------------------
+// Test compatibility with STL sort.
+//------------------------------------------------------------------------
+
+#include <algorithm>
+
+void TestSort() {
+    for( int n=1; n<100; n*=3 ) {
+        tbb::concurrent_vector<int> array;
+        array.grow_by( n );
+        for( int i=0; i<n; ++i )
+            array[i] = (i*7)%n;
+        std::sort( array.begin(), array.end() );
+        for( int i=0; i<n; ++i )
+            ASSERT( array[i]==i, NULL );
+    }
+}
+
+//------------------------------------------------------------------------
+
+int TestMain () {
+    if( MinThread<1 ) {
+        std::printf("ERROR: MinThread=%d, but must be at least 1\n",MinThread);
+    }
+
+    TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>();
+    TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>();
+    TestSequentialFor<tbb::concurrent_vector<Foo> > ();
+    TestResizeAndCopy();
+    TestAssign();
+    TestCapacity();
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        tbb::task_scheduler_init init( nthread );
+        TestParallelFor( nthread );
+        TestConcurrentGrowToAtLeast();
+        TestConcurrentGrowBy( nthread );
+    }
+    TestFindPrimes();
+    TestSort();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/old/test_mutex_v2.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/old/test_mutex_v2.cpp
new file mode 100644 (file)
index 0000000..0c3f699
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+//------------------------------------------------------------------------
+// Test TBB mutexes when used with parallel_for.h
+//
+// Usage: test_Mutex.exe [-v] nthread
+//
+// The -v option causes timing information to be printed.
+//
+// Compile with _OPENMP and -openmp
+//------------------------------------------------------------------------
+#include "tbb/atomic.h"
+#include "tbb/blocked_range.h"
+#include "tbb/parallel_for.h"
+#include "tbb/tick_count.h"
+#include "../test/harness.h"
+#include "spin_rw_mutex_v2.h"
+#include <cstdlib>
+#include <cstdio>
+
+#if __linux__
+#define STD std
+#else
+#define STD   /* Cater to broken Windows compilers that are missing "std". */
+#endif /* __linux__ */
+
+// This test deliberately avoids a "using tbb" statement,
+// so that the error of putting types in the wrong namespace will be caught.
+
+template<typename M>
+struct Counter {
+    typedef M mutex_type;
+    M mutex;
+    volatile long value;
+};
+
+//! Function object for use with parallel_for.h.
+template<typename C>
+struct AddOne {
+    C& counter;
+    /** Increments counter once for each iteration in the iteration space. */
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            if( i&1 ) {
+                // Try implicit acquire and explicit release
+                typename C::mutex_type::scoped_lock lock(counter.mutex);
+                counter.value = counter.value+1;
+                lock.release();
+            } else {
+                // Try explicit acquire and implicit release
+                typename C::mutex_type::scoped_lock lock;
+                lock.acquire(counter.mutex);
+                counter.value = counter.value+1;
+            }
+        }
+    }
+    AddOne( C& counter_ ) : counter(counter_) {}
+};
+
+//! Generic test of a TBB mutex type M.
+/** Does not test features specific to reader-writer locks. */
+template<typename M>
+void Test( const char * name ) {
+    if( Verbose ) {
+        printf("%s time = ",name);
+        fflush(stdout);
+    }
+    Counter<M> counter;
+    counter.value = 0;
+    const int n = 100000;
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10000),AddOne<Counter<M> >(counter));
+    tbb::tick_count t1 = tbb::tick_count::now();
+    if( Verbose )
+        printf("%g usec\n",(t1-t0).seconds());
+    if( counter.value!=n )
+        STD::printf("ERROR for %s: counter.value=%ld\n",name,counter.value);
+}
+
+template<typename M, size_t N>
+struct Invariant {
+    typedef M mutex_type;
+    M mutex;
+    const char* mutex_name;
+    volatile long value[N];
+    volatile long single_value;
+    Invariant( const char* mutex_name_ ) :
+        mutex_name(mutex_name_)
+    {
+    single_value = 0;
+        for( size_t k=0; k<N; ++k )
+            value[k] = 0;
+    }
+    void update() {
+        for( size_t k=0; k<N; ++k )
+            ++value[k];
+    }
+    bool value_is( long expected_value ) const {
+        long tmp;
+        for( size_t k=0; k<N; ++k )
+//            if( value[k]!=expected_value )
+//                return false;
+            if( (tmp=value[k])!=expected_value ) {
+                printf("ATTN! %ld!=%ld\n", tmp, expected_value);
+                return false;
+            }
+        return true;
+    }
+    bool is_okay() {
+        return value_is( value[0] );
+    }
+};
+
+//! Function object for use with parallel_for.h.
+template<typename I>
+struct TwiddleInvariant {
+    I& invariant;
+    /** Increments counter once for each iteration in the iteration space. */
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            //! Every 8th access is a write access
+            bool write = (i%8)==7;
+            bool okay = true;
+            bool lock_kept = true;
+            if( (i/8)&1 ) {
+                // Try implicit acquire and explicit release
+                typename I::mutex_type::scoped_lock lock(invariant.mutex,write);
+                if( write ) {
+                    long my_value = invariant.value[0];
+                    invariant.update();
+                    if( i%16==7 ) {
+                        lock_kept = lock.downgrade_to_reader();
+                        if( !lock_kept )
+                            my_value = invariant.value[0] - 1;
+                        okay = invariant.value_is(my_value+1);
+                    }
+                } else {
+                    okay = invariant.is_okay();
+                    if( i%8==3 ) {
+                        long my_value = invariant.value[0];
+                        lock_kept = lock.upgrade_to_writer();
+                        if( !lock_kept )
+                            my_value = invariant.value[0];
+                        invariant.update();
+                        okay = invariant.value_is(my_value+1);
+                    }
+                }
+                lock.release();
+            } else {
+                // Try explicit acquire and implicit release
+                typename I::mutex_type::scoped_lock lock;
+                lock.acquire(invariant.mutex,write);
+                if( write ) {
+                    long my_value = invariant.value[0];
+                    invariant.update();
+                    if( i%16==7 ) {
+                        lock_kept = lock.downgrade_to_reader();
+                        if( !lock_kept )
+                            my_value = invariant.value[0] - 1;
+                        okay = invariant.value_is(my_value+1);
+                    }
+                } else {
+                    okay = invariant.is_okay();
+                    if( i%8==3 ) {
+                        long my_value = invariant.value[0];
+                        lock_kept = lock.upgrade_to_writer();
+                        if( !lock_kept )
+                            my_value = invariant.value[0];
+                        invariant.update();
+                        okay = invariant.value_is(my_value+1);
+                    }
+                }
+            }
+            if( !okay ) {
+                STD::printf( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i),
+                             write?"write,":"read,", write?(i%16==7?"downgrade,":""):(i%8==3?"upgrade,":""),
+                             lock_kept?"lock kept,":"lock not kept,", (i/8)&1?"imp/exp":"exp/imp" );
+            }
+        }
+    }
+    TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {}
+};
+
+/** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */
+template<typename M>
+void TestReaderWriterLock( const char * mutex_name ) {
+    if( Verbose ) {
+        printf("%s readers & writers time = ",mutex_name);
+        fflush(stdout);
+    }
+    Invariant<M,8> invariant(mutex_name);
+    const size_t n = 500000;
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,5000),TwiddleInvariant<Invariant<M,8> >(invariant));
+    tbb::tick_count t1 = tbb::tick_count::now();
+    // There is either a writer or a reader upgraded to a writer for each 4th iteration
+    long expected_value = n/4;
+    if( !invariant.value_is(expected_value) )
+        STD::printf("ERROR for %s: final invariant value is wrong\n",mutex_name);
+    if( Verbose )
+        printf("%g usec\n",(t1-t0).seconds());
+}
+
+/** Test try_acquire functionality of a non-reenterable mutex */
+template<typename M>
+void TestTryAcquire_OneThread( const char * mutex_name ) {
+    M tested_mutex;
+    typename M::scoped_lock lock1;
+    if( lock1.try_acquire(tested_mutex) )
+        lock1.release();
+    else
+        STD::printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+    {
+        typename M::scoped_lock lock2(tested_mutex);
+        if( lock1.try_acquire(tested_mutex) )
+            STD::printf("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name);
+    }
+    if( lock1.try_acquire(tested_mutex) )
+        lock1.release();
+    else
+        STD::printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        if( Verbose )
+            printf( "testing with %d workers\n", static_cast<int>(p) );
+        // Run each test 3 times.
+        for( int i=0; i<3; ++i ) {
+            Test<tbb::spin_rw_mutex>( "Spin RW Mutex" );
+            
+            TestTryAcquire_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); // only tests try_acquire for writers
+            TestReaderWriterLock<tbb::spin_rw_mutex>( "Spin RW Mutex" );
+        }
+        if( Verbose )
+            printf( "calling destructor for task_scheduler_init\n" );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_cutoff.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_cutoff.cpp
new file mode 100644 (file)
index 0000000..f1f50ef
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstdio>
+#include <cstdlib>
+
+#include "tbb/task_scheduler_init.h"
+#include "tbb/task.h"
+#include "tbb/tick_count.h"
+
+long CutOff = 1;
+
+long SerialFib( const long n );
+
+long ParallelFib( const long n ); 
+
+inline void dump_title() {
+    printf("Serial/Parallel, P, N, cutoff, repetitions, time, fib, speedup\n");
+}
+
+inline void output(int P, long n, long c, int T, double serial_elapsed, double elapsed, long result) {
+    printf("%s, %d, %ld, %ld, %d, %g, %ld, %g\n", ( (P == 0) ? "Serial" : "Parallel" ), P, n, c, T, elapsed, result, serial_elapsed / elapsed);
+}
+
+#define MOVE_BY_FOURTHS 1
+inline long calculate_new_cutoff(const long lo, const long hi) {
+#if MOVE_BY_FOURTHS    
+    return lo + (3 + hi - lo ) / 4;
+#else
+    return (hi + lo)/2;
+#endif
+}
+
+void find_cutoff(const int P, const long n, const int T, const double serial_elapsed) {
+    long lo = 1, hi = n;
+    double elapsed = 0, lo_elapsed = 0, hi_elapsed = 0;
+    long final_cutoff = -1;
+
+    tbb::task_scheduler_init init(P);
+
+    while(true) {
+       CutOff = calculate_new_cutoff(lo, hi);
+       long result = 0;
+       tbb::tick_count t0;
+       for (int t = -1; t < T; ++t) {
+           if (t == 0) t0 = tbb::tick_count::now();
+           result += ParallelFib(n);
+       }
+       elapsed = (tbb::tick_count::now() - t0).seconds();
+       output(P,n,CutOff,T,serial_elapsed,elapsed,result);
+
+       if (serial_elapsed / elapsed >= P/2.0) {
+           final_cutoff = CutOff;
+           if (hi == CutOff) {
+               if (hi == lo) {
+                  // we have had this value at both above and below 50%
+                  lo = 1; lo_elapsed = 0;
+               } else  {
+                  break;
+               }
+           }
+           hi = CutOff;
+           hi_elapsed = elapsed;
+       } else {
+           if (lo == CutOff) break;
+           lo = CutOff;
+           lo_elapsed = elapsed;
+       }
+    } 
+
+    double interpolated_cutoff = lo + ( P/2.0 - serial_elapsed/lo_elapsed ) * ( (hi - lo) / ( serial_elapsed/hi_elapsed - serial_elapsed/lo_elapsed ));
+
+    if (final_cutoff != -1) {
+        printf("50%% efficiency cutoff is %ld ( linearly interpolated cutoff is %g )\n", final_cutoff, interpolated_cutoff);
+    } else {
+        printf("Cannot achieve 50%% efficiency\n");
+    }
+
+    return;
+}
+
+int main(int argc, char *argv[]) {
+    if (argc < 4) {
+        printf("Usage: %s threads n repetitions\n",argv[0]); 
+        return 1;
+    }
+
+    dump_title();
+
+    int P = atoi(argv[1]);
+    long n = atol(argv[2]);
+    int T = atoi(argv[3]);
+
+    // collect serial time
+    long serial_result = 0;
+    tbb::tick_count t0; 
+    for (int t = -1; t < T; ++t) {
+        if (t == 0) t0 = tbb::tick_count::now();        
+        serial_result += SerialFib(n);
+    }
+    double serial_elapsed = (tbb::tick_count::now() - t0).seconds();
+    output(0,n,0,T,serial_elapsed,serial_elapsed,serial_result);
+
+    // perform search
+    find_cutoff(P,n,T,serial_elapsed);
+
+    return 0;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp
new file mode 100644 (file)
index 0000000..83d7e49
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstdio>
+#include <cstdlib>
+
+#include "tbb/task_scheduler_init.h"
+#include "tbb/task.h"
+#include "tbb/tick_count.h"
+
+extern long CutOff;
+
+long SerialFib( const long n ) {
+    if( n<2 )
+        return n;
+    else
+        return SerialFib(n-1)+SerialFib(n-2);
+}
+
+struct FibContinuation: public tbb::task {
+    long* const sum;
+    long x, y;
+    FibContinuation( long* sum_ ) : sum(sum_) {}
+    tbb::task* execute() {
+        *sum = x+y;
+        return NULL;
+    }
+};
+
+struct FibTask: public tbb::task {
+    long n;
+    long * sum;
+    FibTask( const long n_, long * const sum_ ) :
+        n(n_), sum(sum_)
+    {}
+    tbb::task* execute() {
+        if( n<CutOff ) {
+            *sum = SerialFib(n);
+            return NULL;
+        } else {
+            FibContinuation& c = 
+                *new( allocate_continuation() ) FibContinuation(sum);
+            FibTask& b = *new( c.allocate_child() ) FibTask(n-1,&c.y);
+            recycle_as_child_of(c);
+            n -= 2;
+            sum = &c.x;
+            // Set ref_count to "two children".
+            c.set_ref_count(2);
+            c.spawn( b );
+            return this;
+        }
+    }
+};
+
+long ParallelFib( const long n ) {
+    long sum = 0;
+    FibTask& a = *new(tbb::task::allocate_root()) FibTask(n,&sum);
+    tbb::task::spawn_root_and_wait(a);
+    return sum;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.cpp
new file mode 100644 (file)
index 0000000..024a5a6
--- /dev/null
@@ -0,0 +1,859 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "perf.h"
+
+#include <cstdlib>
+#include <cmath>
+#include <vector>
+#include <algorithm>
+#include <cassert>
+
+#include "tbb/tick_count.h"
+
+#define HARNESS_CUSTOM_MAIN 1
+#include "../src/test/harness.h"
+#include "../src/test/harness_barrier.h"
+
+#include "tbb/task_scheduler_init.h"
+#include "tbb/task.h"
+#include "tbb/atomic.h"
+
+#if  __linux__ || __APPLE__ || __FreeBSD__
+    #include <sys/resource.h>
+#endif
+
+__TBB_PERF_API int NumCpus = tbb::task_scheduler_init::default_num_threads(),
+                   NumThreads,
+                   MaxConcurrency;
+
+namespace Perf {
+
+SessionSettings theSettings;
+
+namespace internal {
+
+    typedef std::vector<duration_t> durations_t;
+
+    static uintptr_t NumRuns = 7;
+    static duration_t RunDuration = 0.01;
+
+    static const int RateFieldLen = 10;
+    static const int OvhdFieldLen = 12;
+
+    const char* TestNameColumnTitle = "Test name";
+    const char* WorkloadNameColumnTitle = "Workload";
+
+    size_t TitleFieldLen = 0;
+    size_t WorkloadFieldLen = 0;
+
+    int TotalConfigs = 0;
+    int MaxTbbMasters = 1;
+
+    //! Defines the mapping between threads and cores in the undersubscription mode
+    /** When adding new enumerator, insert it before amLast, and do not specify
+        its value explicitly. **/
+    enum AffinitizationMode {
+        amFirst = 0,
+        amDense = amFirst,
+        amSparse,
+        //! Used to track the number of supported affinitization modes
+        amLast
+    };
+
+    static const int NumAffinitizationModes = amLast - amFirst; 
+
+    const char* AffinitizationModeNames[] = { "dense", "sparse" };
+
+    int NumActiveAffModes = 1;
+
+    //! Settings of a test run configuration
+    struct RunConfig {
+        int my_maxConcurrency;
+        int my_numThreads;      // For task scheduler tests this is number of workers + 1
+        int my_numMasters;      // Used for task scheduler tests only
+        int my_affinityMode;    // Used for task scheduler tests only
+        int my_workloadID;
+
+        int NumMasters () const {
+            return theSettings.my_opts & UseTaskScheduler ? my_numMasters : my_numThreads;
+        }
+    };
+
+    double StandardDeviation ( double avg, const durations_t& d ) {
+        double  std_dev = 0;
+        for ( uintptr_t i = 0; i < d.size(); ++i ) {
+            double  dev = fabs(d[i] - avg);
+            std_dev += dev * dev;
+        }
+        std_dev = sqrt(std_dev / d.size());
+        return std_dev / avg * 100;
+    }
+
+    void Statistics ( const durations_t& d, 
+                      duration_t& avgTime, double& stdDev, 
+                      duration_t& minTime, duration_t& maxTime )
+    {
+        minTime = maxTime = avgTime = d[0];
+        for ( size_t i = 1; i < d.size(); ++i ) {
+            avgTime += d[i];
+            if ( minTime > d[i] )
+                minTime = d[i];
+            else if ( maxTime < d[i] )
+                maxTime = d[i];
+        }
+        avgTime = avgTime / d.size();
+        stdDev = StandardDeviation( avgTime, d );
+    }
+
+    //! Timing data for the series of repeated runs and results of their statistical processing
+    struct TimingSeries {
+        //! Statistical timing series
+        durations_t my_durations;
+        
+        //! Average time obtained from my_durations data
+        duration_t  my_avgTime;
+
+        //! Minimal time obtained from my_durations data
+        duration_t  my_minTime;
+
+        //! Minimal time obtained from my_durations data
+        duration_t  my_maxTime;
+
+        //! Standard deviation of my_avgTime value (per cent)
+        double  my_stdDev;
+
+        TimingSeries ( uintptr_t nruns = NumRuns )
+            : my_durations(nruns), my_avgTime(0), my_minTime(0), my_maxTime(0)
+        {}
+
+        void CalculateStatistics () {
+            Statistics( my_durations, my_avgTime, my_stdDev, my_minTime, my_maxTime );
+        }
+    }; // struct TimingSeries
+
+    //! Settings and timing results for a test run configuration
+    struct RunResults {
+        //! Run configuration settings
+        RunConfig   my_config;
+        
+        //! Timing results for this run configuration
+        TimingSeries my_timing;
+    };
+
+    typedef std::vector<const char*>    names_t;
+    typedef std::vector<TimingSeries>   timings_t;
+    typedef std::vector<RunResults>     test_results_t;
+
+    enum TestMethods {
+        idRunSerial = 0x01,
+        idOnStart = 0x02,
+        idOnFinish = 0x04,
+        idPrePostProcess = idOnStart | idOnFinish
+    };
+
+    //! Set of flags identifying methods not overridden by the currently active test
+    /** Used as a scratch var. **/
+    uintptr_t g_absentMethods;
+
+    //! Test object and timing results for all of its configurations 
+    struct TestResults {
+        //! Pointer to the test object interface
+        Test*           my_test;
+
+        //! Set of flags identifying optional methods overridden by my_test
+        /** A set of ORed TestMethods flags **/
+        uintptr_t       my_availableMethods;
+        
+        //! Vector of serial times for each workload supported by this test
+        /** Element index in the vector serves as a zero based workload ID. **/
+        timings_t       my_serialBaselines;
+        
+        //! Common baselines for both parallel and serial variants
+        /** Element index in the vector serves as a zero based workload ID. **/
+        timings_t       my_baselines;
+
+        //! Strings identifying workloads to be used in output
+        names_t         my_workloadNames;
+
+        //! Vector of timings for all run configurations of my_test
+        test_results_t  my_results;
+
+        const char*     my_testName;
+
+        mutable bool    my_hasOwnership;
+
+        TestResults ( Test* t, const char* className, bool takeOwnership )
+            : my_test(t), my_availableMethods(0), my_testName(className), my_hasOwnership(takeOwnership)
+        {}
+
+        TestResults ( const TestResults& tr )
+            : my_test(tr.my_test)
+            , my_availableMethods(0)
+            , my_testName(tr.my_testName)
+            , my_hasOwnership(tr.my_hasOwnership)
+        {
+            tr.my_hasOwnership = false;
+        }
+
+        ~TestResults () {
+            for ( size_t i = 0; i < my_workloadNames.size(); ++i )
+                delete my_workloadNames[i];
+            if ( my_hasOwnership )
+                delete my_test;
+        }
+    }; // struct TestResults
+
+    typedef std::vector<TestResults> session_t;
+
+    session_t theSession;
+
+    TimingSeries CalibrationTiming;
+
+    const uintptr_t CacheSize = 8*1024*1024;
+    volatile intptr_t W[CacheSize];
+
+    struct WiperBody {
+        void operator()( int ) const {
+            volatile intptr_t sink = 0;
+            for ( uintptr_t i = 0; i < CacheSize; ++i )
+                sink += W[i];
+        }
+    };
+
+    void TraceHistogram ( const durations_t& t, const char* histogramFileName ) {
+        FILE* f = histogramFileName ? fopen(histogramFileName, "wt") : stdout;
+        uintptr_t  n = t.size();
+        const uintptr_t num_buckets = 100;
+        double  min_val = *std::min_element(t.begin(), t.end()),
+                max_val = *std::max_element(t.begin(), t.end()),
+                bucket_size = (max_val - min_val) / num_buckets;
+        std::vector<uintptr_t> hist(num_buckets + 1, 0);
+        for ( uintptr_t i = 0; i < n; ++i )
+            ++hist[uintptr_t((t[i]-min_val)/bucket_size)];
+        ASSERT (hist[num_buckets] == 1, "");
+        ++hist[num_buckets - 1];
+        hist.resize(num_buckets);
+        fprintf (f, "Histogram: nvals = %u, min = %g, max = %g, nbuckets = %u\n", (unsigned)n, min_val, max_val, (unsigned)num_buckets);
+        double bucket = min_val;
+        for ( uintptr_t i = 0; i < num_buckets; ++i, bucket+=bucket_size )
+            fprintf (f, "%12g\t%u\n", bucket, (unsigned)hist[i]);
+        fclose(f);
+    }
+
+#if _MSC_VER
+    typedef DWORD_PTR cpu_set_t;
+
+    class AffinityHelper {
+        static const unsigned MaxAffinitySetSize = sizeof(cpu_set_t) * 8;
+        static unsigned AffinitySetSize;
+
+        //! Mapping from a CPU index to a valid affinity cpu_mask
+        /** The first element is not used. **/
+        static cpu_set_t m_affinities[MaxAffinitySetSize + 1];
+
+        static cpu_set_t m_processMask;
+
+        class Initializer {
+        public:
+            Initializer () {
+                SYSTEM_INFO si;
+                GetSystemInfo(&si);
+                ASSERT( si.dwNumberOfProcessors <= MaxAffinitySetSize, "Too many CPUs" );
+                AffinitySetSize = min (si.dwNumberOfProcessors, MaxAffinitySetSize);
+                cpu_set_t systemMask = 0;
+                GetProcessAffinityMask( GetCurrentProcess(), &m_processMask, &systemMask );
+                cpu_set_t cpu_mask = 1;
+                for ( DWORD i = 0; i < AffinitySetSize; ++i ) {
+                    while ( !(cpu_mask & m_processMask) && cpu_mask )
+                        cpu_mask <<= 1;
+                    ASSERT( cpu_mask != 0, "Process affinity set is culled?" );
+                    m_affinities[i] = cpu_mask;
+                    cpu_mask <<= 1;
+                }
+            }
+        }; // class AffinityHelper::Initializer
+
+        static Initializer m_initializer;
+
+    public:
+        static cpu_set_t CpuAffinity ( int cpuIndex ) {
+            return m_affinities[cpuIndex % AffinitySetSize];
+        }
+
+        static const cpu_set_t& ProcessMask () { return m_processMask; }
+    }; // class AffinityHelper
+
+    unsigned AffinityHelper::AffinitySetSize = 0;
+    cpu_set_t AffinityHelper::m_affinities[AffinityHelper::MaxAffinitySetSize + 1] = {0};
+    cpu_set_t AffinityHelper::m_processMask = 0;
+    AffinityHelper::Initializer AffinityHelper::m_initializer;
+
+    #define CPU_ZERO(cpu_mask)              (*cpu_mask = 0)
+    #define CPU_SET(cpu_idx, cpu_mask)      (*cpu_mask |= AffinityHelper::CpuAffinity(cpu_idx))
+    #define CPU_CLR(cpu_idx, cpu_mask)      (*cpu_mask &= ~AffinityHelper::CpuAffinity(cpu_idx))
+    #define CPU_ISSET(cpu_idx, cpu_mask)    ((*cpu_mask & AffinityHelper::CpuAffinity(cpu_idx)) != 0)
+
+#elif __linux__ /* end of _MSC_VER */
+
+    #include <unistd.h>
+    #include <sys/types.h>
+    #include <linux/unistd.h>
+
+    pid_t gettid() { return (pid_t)syscall(__NR_gettid); }
+
+    #define GET_MASK(cpu_set) (*(unsigned*)(void*)&cpu_set)
+    #define RES_STAT(res) (res != 0 ? "failed" : "ok")
+
+    class AffinityHelper {
+        static cpu_set_t m_processMask;
+
+        class Initializer {
+        public:
+            Initializer () {
+                CPU_ZERO (&m_processMask);
+                int res = sched_getaffinity( getpid(), sizeof(cpu_set_t), &m_processMask );
+                ASSERT ( res == 0, "sched_getaffinity failed" );
+            }
+        }; // class AffinityHelper::Initializer
+
+        static Initializer m_initializer;
+
+    public:
+        static const cpu_set_t& ProcessMask () { return m_processMask; }
+    }; // class AffinityHelper
+
+    cpu_set_t AffinityHelper::m_processMask;
+    AffinityHelper::Initializer AffinityHelper::m_initializer;
+#endif /* __linux__ */
+
+    bool PinTheThread ( int cpu_idx, tbb::atomic<int>& nThreads ) {
+        cpu_set_t orig_mask, target_mask;
+        CPU_ZERO( &target_mask );
+        CPU_SET( cpu_idx, &target_mask );
+        ASSERT ( CPU_ISSET(cpu_idx, &target_mask), "CPU_SET failed" );
+    #if _MSC_VER
+        orig_mask = SetThreadAffinityMask( GetCurrentThread(), target_mask );
+        if ( !orig_mask )
+            return false;
+    #elif __linux__
+        CPU_ZERO( &orig_mask );
+        int res = sched_getaffinity( gettid(), sizeof(cpu_set_t), &orig_mask );
+        ASSERT ( res == 0, "sched_getaffinity failed" );
+        res = sched_setaffinity( gettid(), sizeof(cpu_set_t), &target_mask );
+        ASSERT ( res == 0, "sched_setaffinity failed" );
+    #endif /* _MSC_VER */
+        --nThreads;
+        while ( nThreads )
+            __TBB_Yield();
+    #if _MSC_VER
+        SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+    #endif
+        return true;
+    }
+
+    class AffinitySetterTask : tbb::task {
+        static bool m_result;
+        static tbb::atomic<int> m_nThreads;
+        int m_idx;
+
+        tbb::task* execute () {
+            //TestAffinityOps();
+            m_result = PinTheThread( m_idx, m_nThreads );
+            return NULL;
+        }
+
+    public:
+        AffinitySetterTask ( int idx ) : m_idx(idx) {}
+
+        friend bool AffinitizeTBB ( int, int /*mode*/ );
+    };
+
+    bool AffinitySetterTask::m_result = true;
+    tbb::atomic<int> AffinitySetterTask::m_nThreads;
+
+    bool AffinitizeTBB ( int p, int affMode ) {
+    #if _MSC_VER
+        SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+        SetPriorityClass (GetCurrentProcess(), HIGH_PRIORITY_CLASS);
+    #endif
+        AffinitySetterTask::m_result = true;
+        AffinitySetterTask::m_nThreads = p;
+        tbb::task_list  tl;
+        for ( int i = 0; i < p; ++i ) {
+            tbb::task &t = *new( tbb::task::allocate_root() ) AffinitySetterTask( affMode == amSparse ? i * NumCpus / p : i );
+            t.set_affinity( tbb::task::affinity_id(i + 1) );
+            tl.push_back( t );
+        }
+        tbb::task::spawn_root_and_wait(tl);
+        return AffinitySetterTask::m_result;
+    }
+
+    inline 
+    void Affinitize ( int p, int affMode ) {
+        if ( !AffinitizeTBB (p, affMode) )
+            REPORT("Warning: Failed to set affinity for %d TBB threads\n", p);
+    }
+
+    class TbbWorkersTrapper {
+        tbb::atomic<int> my_refcount;
+        tbb::task *my_root;
+        tbb::task_group_context my_context;
+        Harness::SpinBarrier my_barrier;
+
+        friend class TrapperTask;
+
+        class TrapperTask : public tbb::task {
+            TbbWorkersTrapper& my_owner;
+
+            tbb::task* execute () {
+                my_owner.my_barrier.wait();
+                my_owner.my_root->wait_for_all();
+                my_owner.my_barrier.wait();
+                return NULL;
+            }
+        public:
+            TrapperTask ( TbbWorkersTrapper& owner ) : my_owner(owner) {}
+        };
+
+    public:
+        TbbWorkersTrapper ()
+            : my_context(tbb::task_group_context::bound, 
+                         tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait)
+        {
+            my_root = new ( tbb::task::allocate_root(my_context) ) tbb::empty_task;
+            my_root->set_ref_count(2);
+            my_barrier.initialize(NumThreads);
+            for ( int i = 1; i < NumThreads; ++i )
+                tbb::task::spawn( *new(tbb::task::allocate_root()) TrapperTask(*this) );
+            my_barrier.wait(); // Wait util all workers are ready
+        }
+
+        ~TbbWorkersTrapper () {
+            my_root->decrement_ref_count();
+            my_barrier.wait(); // Make sure no tasks are referencing us
+            tbb::task::destroy(*my_root);
+        }
+    }; // TbbWorkersTrapper
+
+
+#if __TBB_STATISTICS
+    static bool StatisticsMode = true;
+#else
+    static bool StatisticsMode = false;
+#endif
+
+//! Suppresses silly warning
+inline bool __TBB_bool( bool b ) { return b; }
+
+#define START_WORKERS(needScheduler, p, a, setWorkersAffinity, trapWorkers) \
+    tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);      \
+    TbbWorkersTrapper *trapper = NULL;                                      \
+    if ( theSettings.my_opts & UseTaskScheduler                   \
+         && (needScheduler) && ((setWorkersAffinity) || (trapWorkers)) )    \
+    {                                                                       \
+        init.initialize( p );                                               \
+        if ( __TBB_bool(setWorkersAffinity) )                               \
+            Affinitize( p, a );                                             \
+        if ( __TBB_bool(trapWorkers) )                                      \
+            trapper = new TbbWorkersTrapper;                                \
+    }
+
+#define STOP_WORKERS()  \
+    if ( theSettings.my_opts & UseTaskScheduler && init.is_active() ) { \
+        if ( trapper )                                                      \
+            delete trapper;                                                 \
+        init.terminate();                                                   \
+        /* Give asynchronous deinitialization time to complete */           \
+        Harness::Sleep(50);                                                 \
+    }
+
+    typedef void (Test::*RunMemFnPtr)( Test::ThreadInfo& );
+
+    TimingSeries *TlsTimings;
+    Harness::SpinBarrier  multipleMastersBarrier;
+
+    class TimingFunctor {
+        Test* my_test;
+        RunConfig *my_cfg;
+        RunMemFnPtr my_fnRun;
+        size_t my_numRuns;
+        size_t my_numRepeats;
+        uintptr_t my_availableMethods;
+
+        duration_t TimeSingleRun ( Test::ThreadInfo& ti ) const {
+            if ( my_availableMethods & idOnStart )
+                my_test->OnStart(ti);
+            multipleMastersBarrier.wait();
+            tbb::tick_count t0 = tbb::tick_count::now();
+            (my_test->*my_fnRun)(ti);
+            duration_t t = (tbb::tick_count::now() - t0).seconds();
+            if ( my_availableMethods & idOnFinish )
+                my_test->OnFinish(ti);
+            return t;
+        }
+
+    public:
+        TimingFunctor ( Test* test, RunConfig *cfg, RunMemFnPtr fnRun, 
+                        size_t numRuns, size_t nRepeats, uintptr_t availableMethods )
+            : my_test(test), my_cfg(cfg), my_fnRun(fnRun)
+            , my_numRuns(numRuns), my_numRepeats(nRepeats), my_availableMethods(availableMethods)
+        {}
+
+        void operator()( int tid ) const {
+            Test::ThreadInfo ti = { tid, NULL };
+            durations_t &d = TlsTimings[tid].my_durations;
+            bool singleMaster = my_cfg->my_numMasters == 1;
+            START_WORKERS( !singleMaster || singleMaster && StatisticsMode, 
+                            my_cfg->my_numThreads, my_cfg->my_affinityMode, singleMaster, singleMaster );
+            for ( uintptr_t k = 0; k < my_numRuns; ++k )  {
+                if ( my_numRepeats > 1 ) {
+                    d[k] = 0;
+                    if ( my_availableMethods & idPrePostProcess ) {
+                        for ( uintptr_t i = 0; i < my_numRepeats; ++i )
+                            d[k] += TimeSingleRun(ti);
+                    }
+                    else {
+                        multipleMastersBarrier.wait();
+                        tbb::tick_count t0 = tbb::tick_count::now();
+                        for ( uintptr_t i = 0; i < my_numRepeats; ++i )
+                            (my_test->*my_fnRun)(ti);
+                        d[k] = (tbb::tick_count::now() - t0).seconds();
+                    }
+                    d[k] /= my_numRepeats;
+                }
+                else
+                    d[k] = TimeSingleRun(ti);
+            }
+            STOP_WORKERS();
+            TlsTimings[tid].CalculateStatistics();
+        }
+    }; // class TimingFunctor
+    
+    void DoTiming ( TestResults& tr, RunConfig &cfg, RunMemFnPtr fnRun, size_t nRepeats, TimingSeries& ts ) {
+        int numThreads = cfg.NumMasters();
+        size_t numRuns = ts.my_durations.size() / numThreads;
+        TimingFunctor body( tr.my_test, &cfg, fnRun, numRuns, nRepeats, tr.my_availableMethods );
+        multipleMastersBarrier.initialize(numThreads);
+        tr.my_test->SetWorkload(cfg.my_workloadID);
+        if ( numThreads == 1 ) {
+            TimingSeries *t = TlsTimings;
+            TlsTimings = &ts;
+            body(0);
+            TlsTimings = t;
+        }
+        else {
+            ts.my_durations.resize(numThreads * numRuns);
+            NativeParallelFor( numThreads, body );
+            for ( int i = 0, j = 0; i < numThreads; ++i ) {
+                durations_t &d = TlsTimings[i].my_durations;
+                for ( size_t k = 0; k < numRuns; ++k, ++j )
+                    ts.my_durations[j] = d[k];
+            }
+            ts.CalculateStatistics();
+        }
+    }
+
+    //! Runs the test function, does statistical processing, and, if title is nonzero, prints results.
+    /** If histogramFileName is a string, the histogram of individual runs is generated and stored
+        in a file with the given name. If it is NULL then the histogram is printed on the console.
+        By default no histogram is generated. 
+        The histogram format is: "rate bucket start" "number of tests in this bucket". **/
+    void RunTestImpl ( TestResults& tr, RunConfig &cfg, RunMemFnPtr pfnTest, TimingSeries& ts ) {
+        // nRepeats is a number of repeated calls to the test function made as 
+        // part of the same run. It is determined experimentally by the following 
+        // calibration process so that the total run time was approx. RunDuration.
+        // This is helpful to increase the measurement precision in case of very 
+        // short tests.
+        size_t nRepeats = 1;
+        // A minimal stats is enough when doing calibration
+        CalibrationTiming.my_durations.resize( (NumRuns < 4 ? NumRuns : 3) * cfg.NumMasters() );
+        // There's no need to be too precise when calculating nRepeats. And reasonably 
+        // far extrapolation can speed up the process significantly.
+        for (;;) {
+            DoTiming( tr, cfg, pfnTest, nRepeats, CalibrationTiming );
+            if ( CalibrationTiming.my_avgTime * nRepeats > 1e-4 )
+                break;
+            nRepeats *= 2;
+        }
+        nRepeats *= (uintptr_t)ceil( RunDuration / (CalibrationTiming.my_avgTime * nRepeats) );
+
+        DoTiming(tr, cfg, pfnTest, nRepeats, ts);
+
+        // No histogram for baseline measurements
+        if ( pfnTest != &Test::RunSerial && pfnTest != &Test::Baseline ) {
+            const char* histogramName = theSettings.my_histogramName;
+            if ( histogramName != NoHistogram && tr.my_test->HistogramName() != DefaultHistogram )
+                histogramName = tr.my_test->HistogramName();
+            if ( histogramName != NoHistogram )
+                TraceHistogram( ts.my_durations, histogramName );
+        }
+    } // RunTestImpl
+
+    typedef void (*TestActionFn) ( TestResults&, int mastersRange, int w, int p, int m, int a, int& numTests );
+
+    int TestResultIndex ( int mastersRange, int w, int p, int m, int a ) {
+        return ((w * (MaxThread - MinThread + 1) + (p - MinThread)) * mastersRange + m) * NumActiveAffModes + a;
+    }
+
+    void RunTest ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& numTests ) {
+        size_t r = TestResultIndex(mastersRange, w, p, m, a);
+        ASSERT( r < tr.my_results.size(), NULL );
+        RunConfig &rc = tr.my_results[r].my_config;
+        rc.my_maxConcurrency = MaxConcurrency;
+        rc.my_numThreads = p;
+        rc.my_numMasters = m + tr.my_test->MinNumMasters();
+        rc.my_affinityMode = a;
+        rc.my_workloadID = w;
+        RunTestImpl( tr, rc, &Test::Run, tr.my_results[r].my_timing );
+        printf( "Running tests: %04.1f%%\r",  ++numTests * 100. / TotalConfigs ); fflush(stdout);
+    }
+
+    void WalkTests ( TestActionFn fn, int& numTests, bool setAffinity, bool trapWorkers, bool multipleMasters ) {
+        for ( int p = MinThread; p <= MaxThread; ++p ) {
+            NumThreads = p;
+            MaxConcurrency = p < NumCpus ? p : NumCpus;
+            for ( int a = 0; a < NumActiveAffModes; ++a ) {
+                START_WORKERS( multipleMasters || !StatisticsMode, p, a, setAffinity, trapWorkers );
+                for ( size_t i = 0; i < theSession.size(); ++i ) {
+                    TestResults &tr = theSession[i];
+                    Test *t = tr.my_test;
+                    int mastersRange = t->MaxNumMasters() - t->MinNumMasters() + 1;
+                    for ( int w = 0; w < t->NumWorkloads(); ++w ) {
+                        if ( multipleMasters )
+                            for ( int m = 1; m < mastersRange; ++m )
+                                fn( tr, mastersRange, w, p, m, a, numTests );
+                        else
+                            fn( tr, mastersRange, w, p, 0, a, numTests );
+                    }
+                }
+                STOP_WORKERS();
+            }
+        }
+    }
+
+    void RunTests () {
+        int numTests = 0;
+        WalkTests( &RunTest, numTests, !StatisticsMode, !StatisticsMode, false );
+        if ( MaxTbbMasters > 1 )
+            WalkTests( &RunTest, numTests, true, false, true );
+    }
+
+    void InitTestData ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& ) {
+        size_t r = TestResultIndex(mastersRange, w, p, m, a);
+        ASSERT( r < tr.my_results.size(), NULL );
+        tr.my_results[r].my_timing.my_durations.resize( 
+            (theSettings.my_opts & UseTaskScheduler ? tr.my_test->MinNumMasters() + m : p) * NumRuns );
+    }
+
+    char WorkloadName[MaxWorkloadNameLen + 1];
+
+    void PrepareTests () {
+        printf( "Initializing...\r" );
+        NumActiveAffModes = theSettings.my_opts & UseAffinityModes ? NumAffinitizationModes : 1;
+        TotalConfigs = 0;
+        TitleFieldLen = strlen( TestNameColumnTitle );
+        WorkloadFieldLen = strlen( WorkloadNameColumnTitle );
+        int numThreads = MaxThread - MinThread + 1;
+        int numConfigsBase = numThreads * NumActiveAffModes;
+        int totalWorkloads = 0;
+        for ( size_t i = 0; i < theSession.size(); ++i ) {
+            TestResults &tr = theSession[i];
+            Test &t = *tr.my_test;
+            int numWorkloads = t.NumWorkloads();
+            int numConfigs = numConfigsBase * numWorkloads;
+            if ( t.MaxNumMasters() > 1 ) {
+                ASSERT( theSettings.my_opts & UseTaskScheduler, "Multiple masters mode is only valid for task scheduler tests" );
+                if ( MaxTbbMasters < t.MaxNumMasters() )
+                    MaxTbbMasters = t.MaxNumMasters();
+                numConfigs *= t.MaxNumMasters() - t.MinNumMasters() + 1;
+            }
+            totalWorkloads += numWorkloads;
+            TotalConfigs += numConfigs;
+
+            const char* testName = t.Name();
+            if ( testName )
+                tr.my_testName = testName;
+            ASSERT( tr.my_testName, "Neither Test::Name() is implemented, nor RTTI is enabled" );
+            TitleFieldLen = max( TitleFieldLen, strlen(tr.my_testName) );
+
+            tr.my_results.resize( numConfigs );
+            tr.my_serialBaselines.resize( numWorkloads );
+            tr.my_baselines.resize( numWorkloads );
+            tr.my_workloadNames.resize( numWorkloads );
+        }
+        TimingSeries tmpTiming;
+        TlsTimings = &tmpTiming; // All measurements are serial here
+        int n = 0;
+        for ( size_t i = 0; i < theSession.size(); ++i ) {
+            TestResults &tr = theSession[i];
+            Test &t = *tr.my_test;
+            // Detect which methods are overridden by the test implementation
+            g_absentMethods = 0;
+            Test::ThreadInfo ti = { 0 };
+            t.SetWorkload(0);
+            t.OnStart(ti);
+            t.RunSerial(ti);
+            t.OnFinish(ti);
+            if ( theSettings.my_opts & UseSerialBaseline && !(g_absentMethods & idRunSerial) )
+                tr.my_availableMethods |= idRunSerial;
+            if ( !(g_absentMethods & idOnStart) )
+                tr.my_availableMethods |= idOnStart;
+
+            RunConfig rc = { 1, 1, 1, 0, 0 };
+            for ( int w = 0; w < t.NumWorkloads(); ++w ) {
+                WorkloadName[0] = 0;
+                t.SetWorkload(w);
+                if ( !WorkloadName[0] )
+                    sprintf( WorkloadName, "%d", w );
+                size_t len = strlen(WorkloadName);
+                tr.my_workloadNames[w] = new char[len + 1];
+                strcpy ( (char*)tr.my_workloadNames[w], WorkloadName );
+                WorkloadFieldLen = max( WorkloadFieldLen, len );
+
+                rc.my_workloadID = w;
+                if ( theSettings.my_opts & UseBaseline )
+                    RunTestImpl( tr, rc, &Test::Baseline, tr.my_baselines[w] );
+                if ( tr.my_availableMethods & idRunSerial )
+                    RunTestImpl( tr, rc, &Test::RunSerial, tr.my_serialBaselines[w] );
+                printf( "Measuring baselines: %04.1f%%\r",  ++n * 100. / totalWorkloads ); fflush(stdout);
+            }
+        }
+        TlsTimings = new TimingSeries[MaxThread + MaxTbbMasters - 1];
+        if ( theSettings.my_opts & UseTaskScheduler ? MaxTbbMasters : MaxThread )
+            WalkTests( &InitTestData, n, false, false, theSettings.my_opts & UseTaskScheduler ? true : false );
+        CalibrationTiming.my_durations.reserve( MaxTbbMasters * 3 );
+        printf( "                                                          \r");
+    }
+
+    FILE* ResFile = NULL;
+
+    void Report ( char const* fmt, ... ) {
+        va_list args;
+        va_start( args, fmt );
+        if ( ResFile )
+            vfprintf( ResFile, fmt, args );
+        va_start( args, fmt );
+        vprintf( fmt, args );
+    }
+
+    void PrintResults () {
+        if ( theSettings.my_resFile )
+            ResFile = fopen( theSettings.my_resFile, "w" );
+        Report( "%-*s %-*s %s", TitleFieldLen, "Test name", WorkloadFieldLen, "Workload", 
+                                MaxTbbMasters > 1 ? "W    M    " : "T    " );
+        if ( theSettings.my_opts & UseAffinityModes )
+            Report( "Aff  " );
+        Report( "%-*s SD, %%  %-*s %-*s %-*s ",
+                RateFieldLen, "Avg.time", OvhdFieldLen, "Par.ovhd,%",
+                RateFieldLen, "Min.time", RateFieldLen, "Max.time" );
+        Report( " | Repeats = %lu, CPUs %d\n", (unsigned long)NumRuns, NumCpus );
+        for ( size_t i = 0; i < theSession.size(); ++i ) {
+            TestResults &tr = theSession[i];
+            for ( size_t j = 0; j < tr.my_results.size(); ++j ) {
+                RunResults &rr = tr.my_results[j];
+                RunConfig &rc = rr.my_config;
+                int w = rc.my_workloadID;
+                TimingSeries &ts = rr.my_timing;
+                duration_t baselineTime = tr.my_baselines[w].my_avgTime,
+                           cleanTime = ts.my_avgTime - baselineTime;
+                Report( "%-*s %-*s ", TitleFieldLen, tr.my_testName, WorkloadFieldLen, tr.my_workloadNames[w] );
+                if ( MaxTbbMasters > 1 )
+                    Report( "%-4d %-4d ", rc.my_numThreads - 1, rc.my_numMasters );
+                else
+                    Report( "%-4d ", rc.my_numThreads );
+                if ( theSettings.my_opts & UseAffinityModes )
+                    Report( "%%-8s ", AffinitizationModeNames[rc.my_affinityMode] );
+                Report( "%-*.2e %-6.1f ", RateFieldLen, cleanTime, ts.my_stdDev);
+                if ( tr.my_availableMethods & idRunSerial  ) {
+                    duration_t serialTime = (tr.my_serialBaselines[w].my_avgTime - baselineTime) / rc.my_maxConcurrency;
+                    Report( "%-*.1f ", OvhdFieldLen, 100*(cleanTime - serialTime)/serialTime );
+                }
+                else
+                    Report( "%*s%*s ", OvhdFieldLen/2, "-", OvhdFieldLen - OvhdFieldLen/2, "" );
+                Report( "%-*.2e %-*.2e ", RateFieldLen, ts.my_minTime - baselineTime, RateFieldLen, ts.my_maxTime - baselineTime);
+                Report( "\n" );
+            }
+        }
+        delete [] TlsTimings;
+        if ( ResFile )
+            fclose(ResFile);
+    }
+
+    __TBB_PERF_API void RegisterTest ( Test* t, const char* className, bool takeOwnership ) {
+        // Just collect test objects at this stage
+        theSession.push_back( TestResults(t, className, takeOwnership) );
+    }
+
+} // namespace internal
+
+__TBB_PERF_API void Test::Baseline ( ThreadInfo& ) {}
+
+__TBB_PERF_API void Test::RunSerial ( ThreadInfo& ) { internal::g_absentMethods |= internal::idRunSerial; }
+
+__TBB_PERF_API void Test::OnStart ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnStart; }
+
+__TBB_PERF_API void Test::OnFinish ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnFinish; }
+
+__TBB_PERF_API void WipeCaches () { NativeParallelFor( NumCpus, internal::WiperBody() ); }
+
+__TBB_PERF_API void EmptyFunc () {}
+__TBB_PERF_API void AnchorFunc ( void* ) {}
+__TBB_PERF_API void AnchorFunc2 ( void*, void* ) {}
+
+__TBB_PERF_API void SetWorkloadName( const char* format, ... ) {
+    internal::WorkloadName[MaxWorkloadNameLen] = 0;
+    va_list args;
+    va_start(args, format);
+    vsnprintf( internal::WorkloadName, MaxWorkloadNameLen, format, args );
+    va_end(args);
+}
+
+
+__TBB_PERF_API int TestMain( int argc, char* argv[], const SessionSettings* defaultSettings ) {
+#if _MSC_VER
+    HANDLE hMutex = CreateMutex( NULL, FALSE, "Global\\TBB_OMP_PerfSession" );
+    WaitForSingleObject( hMutex, INFINITE );
+#endif
+    MinThread = MaxThread = NumCpus;
+    if ( defaultSettings )
+        theSettings = *defaultSettings;
+    ParseCommandLine( argc, argv );  // May override data in theSettings
+
+    internal::PrepareTests ();
+    internal::RunTests ();
+    internal::PrintResults();
+    REPORT("\n");
+#if _MSC_VER
+    ReleaseMutex( hMutex );
+    CloseHandle( hMutex );
+#endif
+    return 0;
+}
+
+} // namespace Perf
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.h b/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf.h
new file mode 100644 (file)
index 0000000..a4b0858
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#ifndef __tbb_perf_h__
+#define __tbb_perf_h__
+
+#ifndef TBB_PERF_TYPEINFO
+#define TBB_PERF_TYPEINFO 1
+#endif
+
+#if TBB_PERF_TYPEINFO
+    #include <typeinfo>
+    #define __TBB_PERF_TEST_CLASS_NAME(T) typeid(T).name()
+#else /* !TBB_PERF_TYPEINFO */
+    #define __TBB_PERF_TEST_CLASS_NAME(T) NULL
+#endif /* !TBB_PERF_TYPEINFO */
+
+
+#include "tbb/tick_count.h"
+
+// TODO: Fix build scripts to provide more reliable build phase identification means
+#ifndef __TBB_PERF_API
+#if _USRDLL
+    #if _MSC_VER
+        #define __TBB_PERF_API __declspec(dllexport)
+    #else /* !_MSC_VER */
+        #define __TBB_PERF_API
+    #endif /* !_MSC_VER */
+#else /* !_USRDLL */
+    #if _MSC_VER
+        #define __TBB_PERF_API __declspec(dllimport)
+    #else /* !_MSC_VER */
+        #define __TBB_PERF_API
+    #endif /* !_MSC_VER */
+#endif /* !_USRDLL */
+#endif /* !__TBB_PERF_API */
+
+#if _WIN32||_WIN64
+
+namespace Perf {
+    typedef unsigned __int64 tick_t;
+    #if defined(_M_AMD64)
+        inline tick_t rdtsc () { return __rdtsc(); }
+    #elif _M_IX86
+        inline tick_t rdtsc () { __asm { rdtsc } }
+    #else
+        #error Unsupported ISA
+    #endif
+} // namespace Perf
+
+#elif __linux__ || __APPLE__
+
+#include <stdint.h>
+
+namespace Perf {
+    typedef uint64_t tick_t;
+    #if __x86_64__ || __i386__ || __i386
+        inline tick_t rdtsc () {
+            uint32_t lo, hi;
+            __asm__ __volatile__ ( "rdtsc" : "=a" (lo), "=d" (hi) );
+            return (tick_t)lo | ((tick_t)hi) << 32;
+        }
+    #else
+        #error Unsupported ISA
+    #endif
+} // namespace Perf
+
+#else
+    #error Unsupported OS
+#endif /* OS */
+
+__TBB_PERF_API extern int NumThreads,
+                          MaxConcurrency,
+                          NumCpus;
+
+// Functions and global variables provided by the benchmarking framework
+namespace Perf {
+
+typedef double duration_t;
+
+static const int MaxWorkloadNameLen = 64;
+
+static const char* NoHistogram = (char*)-1;
+static const char* DefaultHistogram = (char*)-2;
+
+__TBB_PERF_API void AnchorFunc ( void* );
+__TBB_PERF_API void AnchorFunc2 ( void*, void*  );
+
+//! Helper that can be used in the preprocess handler to clean caches
+/** Cleaning caches is necessary to obtain reproducible results when a test
+    accesses significant ranges of memory. **/
+__TBB_PERF_API void WipeCaches ();
+
+//! Specifies the name to be used to designate the current workload in output
+/** Should be used from Test::SetWorkload(). If necessary workload name will be
+    truncated to MaxWorkloadNameLen characters. **/
+__TBB_PERF_API void SetWorkloadName( const char* format, ... );
+
+class __TBB_PERF_API Test {
+public:
+    virtual ~Test () {}
+
+    //! Struct used by tests running in multiple masters mode
+    struct ThreadInfo {
+        //! Zero based thread ID
+        int     tid;
+        //! Pointer to test specific data
+        /** If used by the test, should be initialized by OnStartLocal(), and 
+            finalized by OnFinishLocal(). **/
+        void*   data;
+    };
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Mandatory methods
+    
+    //! Returns the number of workloads supported
+    virtual int NumWorkloads () = 0;
+
+    //! Set workload info for the subsequent calls to Run() and RunSerial()
+    /** This method can use global helper function Perf::SetWorkloadName() in order
+        to specify the name of the current workload, which will be used in output
+        to designate the workload. If SetWorkloadName is not called, workloadIndex
+        will be used for this purpose.
+
+        When testing task scheduler, make sure that this method does not trigger
+        its automatic initialization. **/
+    virtual void SetWorkload ( int workloadIndex ) = 0;
+
+    //! Test implementation
+    /** Called by the timing framework several times in a loop to achieve approx.
+        RunDuration time, and this loop is timed NumRuns times to collect statistics.
+        Argument ti specifies information about the master thread calling this method. **/
+    virtual void Run ( ThreadInfo& ti ) = 0;
+
+    ////////////////////////////////////////////////////////////////////////////////
+    // Optional methods
+
+    //! Returns short title string to be used in the regular output to identify the test
+    /** Should uniquely identify the test among other ones in the given benchmark suite.
+        If not implemented, the test implementation class' RTTI name is used. **/
+    virtual const char* Name () { return NULL; };
+
+    //! Returns minimal number of master threads
+    /** Used for task scheduler tests only (when UseTbbScheduler option is specified 
+        in session settings). **/
+    virtual int MinNumMasters () { return 1; }
+
+    //! Returns maximal number of master threads
+    /** Used for task scheduler tests only (when UseTbbScheduler option is specified 
+        in session settings). **/
+    virtual int MaxNumMasters () { return 1; }
+
+    //! Executes serial workload equivalent to the one processed by Run()
+    /** Called by the timing framework several times in a loop to collect statistics. **/
+    virtual void RunSerial ( ThreadInfo& ti );
+
+    //! Invoked before each call to Run() 
+    /** Can be used to preinitialize data necessary for the test, clean up 
+        caches (see Perf::WipeCaches), etc.
+        In multiple masters mode this method is called on each thread. **/
+    virtual void OnStart ( ThreadInfo& ti );
+
+    //! Invoked after each call to Run() 
+    /** Can be used to free resources allocated by OnStart().
+        Note that this method must work correctly independently of whether Run(),
+        RunSerial() or nothing is called between OnStart() and OnFinish().
+        In multiple masters mode this method is called on each thread. **/
+    virtual void OnFinish ( ThreadInfo& ti );
+
+    //! Functionality, the cost of which has to be factored out from timing results
+    /** Applies to both parallel and serial versions. **/
+    virtual void Baseline ( ThreadInfo& );
+
+    //! Returns description string to be used in the benchmark info/summary output
+    virtual const char* Description () { return NULL; }
+
+    //! Specifies if the histogram of individual run times in a series
+    /** If the method is not overridden, histogramName argument of TestMain is used. **/
+    virtual const char* HistogramName () { return DefaultHistogram; }
+}; // class Test
+
+namespace internal {
+    __TBB_PERF_API void RegisterTest ( Test*, const char* testClassName, bool takeOwnership );
+}
+
+template<class T>
+void RegisterTest() { internal::RegisterTest( new T, __TBB_PERF_TEST_CLASS_NAME(T), true ); }
+
+template<class T>
+void RegisterTest( T& t ) { internal::RegisterTest( &t, __TBB_PERF_TEST_CLASS_NAME(T), false ); }
+
+enum SessionOptions {
+    //! Use Test::RunSerial if present
+    UseBaseline = 0x01,
+    UseSerialBaseline = 0x02,
+    UseBaselines = UseBaseline | UseSerialBaseline,
+    UseTaskScheduler = 0x10,
+    UseAffinityModes = 0x20
+};
+
+struct SessionSettings {
+    //! A combination of SessionOptions flags
+    uintptr_t my_opts;
+
+    //! Name of a file to store performance results
+    /** These results are duplicates of what is printed on the console. **/
+    const char* my_resFile;
+
+    //! Output destination for the histogram of individual run times in a series
+    /** If it is a string, the histogram is stored in a file with such name. 
+        If it is NULL, the histogram is printed on the console. By default histograms
+        are suppressed.
+
+        The histogram is formatted as two column table: 
+        "time bucket start" "number of tests in this bucket"
+        
+        When this setting enables histogram generation, an individual test 
+        can override it by implementing HistogramName method. **/
+    const char* my_histogramName;
+
+    SessionSettings ( uintptr_t opts = 0, const char* resFile = NULL, const char* histogram = NoHistogram )
+        : my_opts(opts)
+        , my_resFile(resFile)
+        , my_histogramName(histogram)
+    {}
+}; // struct SessionSettings
+
+//! Benchmarking session entry point
+/** Executes all the individual tests registered previously by means of 
+    RegisterTest<MycrotestImpl> **/
+__TBB_PERF_API int TestMain( int argc, char* argv[],
+                             const SessionSettings* defaultSettings = NULL );
+
+
+} // namespace Perf
+
+#endif /* __tbb_perf_h__ */
+
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf_sched.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/perf_sched.cpp
new file mode 100644 (file)
index 0000000..5cf2286
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "perf.h"
+
+#include <cmath>
+
+#include "tbb/blocked_range.h"
+#include "tbb/parallel_for.h"
+#include "tbb/parallel_reduce.h"
+
+#define NUM_CHILD_TASKS     2096
+#define NUM_ROOT_TASKS      256
+
+#define N               100000000
+#define FINEST_GRAIN    10
+#define FINE_GRAIN      50
+#define MED_GRAIN       200
+#define COARSE_GRAIN    1000
+
+
+typedef int count_t;
+
+const count_t N_finest = (count_t)(N/log((double)N)/10);
+const count_t N_fine = N_finest * 20;
+const count_t N_med = N_fine * (count_t)log((double)N) / 5;
+
+class StaticTaskHolder {
+public:
+    tbb::task *my_SimpleLeafTaskPtr;
+    StaticTaskHolder ();
+};
+
+static StaticTaskHolder s_tasks;
+
+static count_t NumIterations;
+static count_t NumLeafTasks;
+static count_t NumRootTasks;
+
+class SimpleLeafTask : public tbb::task {
+    task* execute () {
+        volatile count_t anchor = 0;
+        for ( count_t i=0; i < NumIterations; ++i )
+            anchor += i;
+        return NULL;
+    }
+public:
+    SimpleLeafTask ( count_t ) {}
+};
+
+StaticTaskHolder::StaticTaskHolder () {
+    static SimpleLeafTask s_t1(0);
+    my_SimpleLeafTaskPtr = &s_t1;
+}
+
+class Test_SPMC : public Perf::Test {
+protected:
+    static const int numWorkloads = 4;
+    static const count_t workloads[numWorkloads];
+
+    const char* Name () { return "SPMC"; }
+
+    int NumWorkloads () { return numWorkloads; }
+
+    void SetWorkload ( int idx ) {
+        NumRootTasks = 1;
+        NumIterations = workloads[idx];
+        NumLeafTasks = NUM_CHILD_TASKS * NUM_ROOT_TASKS / (NumIterations > 1000 ? 32 : 8);
+        Perf::SetWorkloadName( "%d x %d", NumLeafTasks, NumIterations );
+    }
+    
+    void Run ( ThreadInfo& ) {
+        tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+        r.set_ref_count( NumLeafTasks + 1 );
+        for ( count_t i = 0; i < NumLeafTasks; ++i )
+            r.spawn( *new(r.allocate_child()) SimpleLeafTask(0) );
+        r.wait_for_all();
+        tbb::task::destroy(r);
+    }
+
+    void RunSerial ( ThreadInfo& ) {
+        const count_t n = NumLeafTasks * NumRootTasks;
+        for ( count_t i=0; i < n; ++i )
+            s_tasks.my_SimpleLeafTaskPtr->execute();
+    }
+}; // class Test_SPMC
+
+const count_t Test_SPMC::workloads[Test_SPMC::numWorkloads] = { 1, 50, 500, 5000 };
+
+template<class LeafTask>
+class LeavesLauncherTask : public tbb::task {
+    count_t my_groupId;
+
+    task* execute () {
+        count_t base = my_groupId * NumLeafTasks;
+        set_ref_count(NumLeafTasks + 1);
+        for ( count_t i = 0; i < NumLeafTasks; ++i )
+            spawn( *new(allocate_child()) LeafTask(base + i) );
+        wait_for_all();
+        return NULL;
+    }
+public:
+    LeavesLauncherTask ( count_t groupId ) : my_groupId(groupId) {}
+};
+
+template<class LeafTask>
+void RunShallowTree () {
+    tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    r.set_ref_count( NumRootTasks + 1 );
+    for ( count_t i = 0; i < NumRootTasks; ++i )
+        r.spawn( *new(r.allocate_child()) LeavesLauncherTask<LeafTask>(i) );
+    r.wait_for_all();
+    tbb::task::destroy(r);
+}
+
+class Test_ShallowTree : public Test_SPMC {
+    const char* Name () { return "ShallowTree"; }
+
+    void SetWorkload ( int idx ) {
+        NumRootTasks = NUM_ROOT_TASKS;
+        NumIterations = workloads[idx];
+        NumLeafTasks = NumIterations > 200 ? NUM_CHILD_TASKS / 10 : 
+                            (NumIterations > 50 ? NUM_CHILD_TASKS / 2 : NUM_CHILD_TASKS * 2);
+        Perf::SetWorkloadName( "%d x %d", NumRootTasks * NumLeafTasks, NumIterations );
+    }
+
+    void Run ( ThreadInfo& ) {
+        RunShallowTree<SimpleLeafTask>();
+    }
+}; // class Test_ShallowTree
+
+class LeafTaskSkewed : public tbb::task {
+    count_t my_ID;
+
+    task* execute () {
+        volatile count_t anchor = 0;
+        double K = (double)NumRootTasks * NumLeafTasks;
+        count_t n = count_t(sqrt(double(my_ID)) * double(my_ID) * my_ID / (4 * K * K));
+        for ( count_t i = 0; i < n; ++i )
+            anchor += i;
+        return NULL;
+    }
+public:
+    LeafTaskSkewed ( count_t id ) : my_ID(id) {}
+};
+
+class Test_ShallowTree_Skewed : public Perf::Test {
+    const char* Name () { return "ShallowTree_Skewed"; }
+
+    int NumWorkloads () { return 1; }
+
+    void SetWorkload ( int ) {
+        NumRootTasks = NUM_ROOT_TASKS;
+        NumLeafTasks = NUM_CHILD_TASKS;
+        Perf::SetWorkloadName( "%d", NumRootTasks * NumLeafTasks );
+    }
+
+    void Run ( ThreadInfo& ) {
+        RunShallowTree<LeafTaskSkewed>();
+    }
+}; // class Test_ShallowTree_Skewed
+
+typedef tbb::blocked_range<count_t> range_t;
+
+static count_t  IterRange = N,
+                IterGrain = 1;
+
+enum PartitionerType {
+    SimplePartitioner = 0,
+    AutoPartitioner = 1
+};
+
+class Test_Algs : public Perf::Test {
+protected:
+    static const int numWorkloads = 4;
+    static const count_t algRanges[numWorkloads];
+    static const count_t algGrains[numWorkloads];
+
+    tbb::simple_partitioner    my_simplePartitioner;
+    tbb::auto_partitioner    my_autoPartitioner;
+    PartitionerType my_partitionerType;
+
+    bool UseAutoPartitioner () const { return my_partitionerType == AutoPartitioner; }
+
+    int NumWorkloads () { return UseAutoPartitioner() ? 3 : numWorkloads; }
+
+    void SetWorkload ( int idx ) {
+        if ( UseAutoPartitioner() ) {
+            IterRange = algRanges[idx ? numWorkloads - 1 : 0];
+            IterGrain = idx > 1 ? algGrains[numWorkloads - 1] : 1;
+        }
+        else {
+            IterRange = algRanges[idx];
+            IterGrain = algGrains[idx];
+        }
+        Perf::SetWorkloadName( "%d / %d", IterRange, IterGrain );
+    }
+public:
+    Test_Algs ( PartitionerType pt = SimplePartitioner ) : my_partitionerType(pt) {}
+}; // class Test_Algs
+
+const count_t Test_Algs::algRanges[] = {N_finest, N_fine, N_med, N};
+const count_t Test_Algs::algGrains[] = {1, FINE_GRAIN, MED_GRAIN, COARSE_GRAIN};
+
+template <typename Body>
+class Test_PFor : public Test_Algs {
+protected:
+    void Run ( ThreadInfo& ) {
+        if ( UseAutoPartitioner() )
+            tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_autoPartitioner );
+        else
+            tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_simplePartitioner );
+    }
+
+    void RunSerial ( ThreadInfo& ) {
+        Body body;
+        body( range_t(0, IterRange, IterGrain) );
+    }
+public:
+    Test_PFor ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {}
+}; // class Test_PFor
+
+class SimpleForBody {
+public:
+    void operator()( const range_t& r ) const {
+        count_t end = r.end();
+        volatile count_t anchor = 0;
+        for( count_t i = r.begin(); i < end; ++i )
+            anchor += i;
+    }
+}; // class SimpleForBody
+
+class Test_PFor_Simple : public Test_PFor<SimpleForBody> {
+protected:
+    const char* Name () { return UseAutoPartitioner() ? "PFor-AP" : "PFor"; }
+public:
+    Test_PFor_Simple ( PartitionerType pt = SimplePartitioner ) : Test_PFor<SimpleForBody>(pt) {}
+}; // class Test_PFor_Simple
+
+class SkewedForBody {
+public:
+    void operator()( const range_t& r ) const {
+        count_t end = r.end() * r.end();
+        volatile count_t anchor = 0;
+        for( count_t i = r.begin() * r.begin(); i < end; ++i )
+            anchor += i;
+    }
+}; // class SkewedForBody
+
+class Test_PFor_Skewed : public Test_PFor<SkewedForBody> {
+    typedef Test_PFor<SkewedForBody> base_type;
+protected:
+    const char* Name () { return UseAutoPartitioner() ? "PFor-Skewed-AP" : "PFor-Skewed"; }
+
+    void SetWorkload ( int idx ) {
+        base_type::SetWorkload(idx);
+        IterRange = (count_t)(sqrt((double)IterRange) * sqrt(sqrt((double)N / IterRange)));
+        Perf::SetWorkloadName( "%d", IterRange );
+    }
+
+public:
+    Test_PFor_Skewed ( PartitionerType pt = SimplePartitioner ) : base_type(pt) {}
+}; // class Test_PFor_Skewed
+
+PartitionerType gPartitionerType;
+count_t NestingRange;
+count_t NestingGrain;
+
+class NestingForBody {
+    count_t my_depth;
+    tbb::simple_partitioner my_simplePartitioner;
+    tbb::auto_partitioner my_autoPartitioner;
+    
+    template<class Partitioner>
+    void run ( const range_t& r, Partitioner& p ) const {
+        count_t end = r.end();
+        if ( my_depth > 1 )
+            for ( count_t i = r.begin(); i < end; ++i )
+                tbb::parallel_for( range_t(0, IterRange, IterGrain), NestingForBody(my_depth - 1), p );
+        else
+            for ( count_t i = r.begin(); i < end; ++i )
+                tbb::parallel_for( range_t(0, IterRange, IterGrain), SimpleForBody(), p );
+    }
+public:
+    void operator()( const range_t& r ) const {
+        if ( gPartitionerType == AutoPartitioner )
+            run( r, my_autoPartitioner );
+        else
+            run( r, my_simplePartitioner );
+    }
+    NestingForBody ( count_t depth = 1 ) : my_depth(depth) {}
+}; // class NestingForBody
+
+enum NestingType {
+    HollowNesting,
+    ShallowNesting,
+    DeepNesting
+};
+
+class Test_PFor_Nested : public Test_Algs {
+    typedef Test_Algs base_type;
+
+    NestingType my_nestingType;
+    count_t my_nestingDepth;
+
+protected:
+    const char* Name () {
+        static const char* names[] = { "PFor-HollowNested", "PFor-HollowNested-AP",
+                                       "PFor-ShallowNested", "PFor-ShallowNested-AP",
+                                       "PFor-DeeplyNested", "PFor-DeeplyNested-AP" };
+        return names[my_nestingType * 2 + my_partitionerType];
+    }
+
+    int NumWorkloads () { return my_nestingType == ShallowNesting ? (UseAutoPartitioner() ? 3 : 2) : 1; }
+
+    void SetWorkload ( int idx ) {
+        gPartitionerType = my_partitionerType;
+        if ( my_nestingType == DeepNesting ) {
+            NestingRange = 1024;
+            IterGrain = NestingGrain = 1;
+            IterRange = 4;
+            my_nestingDepth = 4;
+        }
+        else if ( my_nestingType == ShallowNesting ) {
+            int i = idx ? numWorkloads - 1 : 0;
+            count_t baseRange = algRanges[i];
+            count_t baseGrain = !UseAutoPartitioner() || idx > 1 ? algGrains[i] : 1;
+            NestingRange = IterRange = (count_t)sqrt((double)baseRange);
+            NestingGrain = IterGrain = (count_t)sqrt((double)baseGrain);
+        }
+        else {
+            NestingRange = N / 100;
+            NestingGrain = COARSE_GRAIN / 10;
+            IterRange = 2;
+            IterGrain = 1;
+        }
+        Perf::SetWorkloadName( "%d / %d", NestingRange, NestingGrain );
+    }
+
+    void Run ( ThreadInfo& ) {
+        if ( UseAutoPartitioner() )
+            tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_autoPartitioner );
+        else
+            tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_simplePartitioner );
+    }
+
+    void RunSerial ( ThreadInfo& ) {
+        for ( int i = 0; i < NestingRange; ++i ) {
+            SimpleForBody body;
+            body( range_t(0, IterRange, IterGrain) );
+        }
+    }
+public:
+    Test_PFor_Nested ( NestingType nt, PartitionerType pt ) : base_type(pt), my_nestingType(nt), my_nestingDepth(1) {}
+}; // class Test_PFor_Nested
+
+class SimpleReduceBody {
+public:
+    count_t my_sum;
+    SimpleReduceBody () : my_sum(0) {}
+    SimpleReduceBody ( SimpleReduceBody&, tbb::split ) : my_sum(0) {}
+    void join( SimpleReduceBody& rhs ) { my_sum += rhs.my_sum;}
+    void operator()( const range_t& r ) {
+        count_t end = r.end();
+        volatile count_t anchor = 0;
+        for( count_t i = r.begin(); i < end; ++i )
+            anchor += i;
+        my_sum = anchor;
+    }
+}; // class SimpleReduceBody
+
+class Test_PReduce : public Test_Algs {
+protected:
+    const char* Name () { return UseAutoPartitioner() ? "PReduce-AP" : "PReduce"; }
+
+    void Run ( ThreadInfo& ) {
+        SimpleReduceBody body;
+        if ( UseAutoPartitioner() )
+            tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_autoPartitioner );
+        else
+            tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_simplePartitioner );
+    }
+
+    void RunSerial ( ThreadInfo& ) {
+        SimpleReduceBody body;
+        body( range_t(0, IterRange, IterGrain) );
+    }
+public:
+    Test_PReduce ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {}
+}; // class Test_PReduce
+
+int main( int argc, char* argv[] ) {
+    Perf::SessionSettings opts (Perf::UseTaskScheduler);   // Perf::UseBaseline | Perf::MeasureOverhead
+    Test_PFor_Nested pf_dn_sp(DeepNesting, SimplePartitioner), pf_dn_ap(DeepNesting, AutoPartitioner);
+    Perf::RegisterTest(pf_dn_sp);
+    Perf::RegisterTest(pf_dn_ap);
+    return Perf::TestMain(argc, argv, &opts);
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/run_statistics.sh b/deal.II/contrib/tbb/tbb30_104oss/src/perf/run_statistics.sh
new file mode 100644 (file)
index 0000000..5e1ed14
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+#
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH
+#setting output format .csv, 'pivot' - is pivot table mode, ++ means append
+export STAT_FORMAT=pivot-csv++
+#check existing files because of apend mode
+ls *.csv
+rm -i *.csv
+#setting a delimiter in txt or csv file
+#export STAT_DELIMITER=,
+export STAT_RUNINFO1=Host=`hostname -s`
+#append a suffix after the filename
+#export STAT_SUFFIX=$STAT_RUNINFO1
+for ((i=1;i<=${repeat:=100};++i)); do echo $i of $repeat: && STAT_RUNINFO2=Run=$i $* || break; done
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.cpp
new file mode 100644 (file)
index 0000000..830b89f
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "statistics.h"
+#include "statistics_xml.h"
+
+#define COUNT_PARAMETERS 3
+
+#ifdef _MSC_VER
+#define snprintf _snprintf
+#endif
+
+void GetTime(char* buff,int size_buff) 
+{
+    tm *newtime;
+    time_t timer;
+    time(&timer);
+    newtime=localtime(&timer);
+    strftime(buff,size_buff,"%H:%M:%S",newtime); 
+}
+
+void GetDate(char* buff,int size_buff) 
+{
+    tm *newtime;
+    time_t timer;
+    time(&timer);  
+    newtime=localtime(&timer);
+    strftime(buff,size_buff,"%Y-%m-%d",newtime); 
+}
+
+
+StatisticsCollector::TestCase StatisticsCollector::SetTestCase(const char *name, const char *mode, int threads)
+{
+    string KeyName(name);
+    switch (SortMode)
+    {
+    case ByThreads: KeyName += Format("_%02d_%s", threads, mode); break;
+    default:
+    case ByAlg: KeyName += Format("_%s_%02d", mode, threads); break;
+    }
+    CurrentKey = Statistics[KeyName];
+    if(!CurrentKey) {
+        CurrentKey = new StatisticResults;
+        CurrentKey->Mode = mode;
+        CurrentKey->Name = name;
+        CurrentKey->Threads = threads;
+        CurrentKey->Results.reserve(RoundTitles.size());
+        Statistics[KeyName] = CurrentKey;
+    }
+    return TestCase(CurrentKey);
+}
+
+StatisticsCollector::~StatisticsCollector()
+{
+    for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+        delete i->second;
+}
+
+void StatisticsCollector::ReserveRounds(size_t index)
+{
+    size_t i = RoundTitles.size();
+    if (i > index) return;
+    char buf[16];
+    RoundTitles.resize(index+1);
+    for(; i <= index; i++) {
+        snprintf( buf, 15, "%u", unsigned(i+1) );
+        RoundTitles[i] = buf;
+    }
+    for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) {
+        if(!i->second) printf("!!!'%s' = NULL\n", i->first.c_str());
+        else i->second->Results.reserve(index+1);
+    }
+}
+
+void StatisticsCollector::AddRoundResult(const TestCase &key, value_t v)
+{
+    ReserveRounds(key.access->Results.size());
+    key.access->Results.push_back(v);
+}
+
+void StatisticsCollector::SetRoundTitle(size_t index, const char *fmt, ...)
+{
+    vargf2buff(buff, 128, fmt);
+    ReserveRounds(index);
+    RoundTitles[index] = buff;
+}
+
+void StatisticsCollector::AddStatisticValue(const TestCase &key, const char *type, const char *fmt, ...)
+{
+    vargf2buff(buff, 128, fmt);
+    AnalysisTitles.insert(type);
+    key.access->Analysis[type] = buff;
+}
+
+void StatisticsCollector::AddStatisticValue(const char *type, const char *fmt, ...)
+{
+    vargf2buff(buff, 128, fmt);
+    AnalysisTitles.insert(type);
+    CurrentKey->Analysis[type] = buff;
+}
+
+void StatisticsCollector::SetRunInfo(const char *title, const char *fmt, ...)
+{
+    vargf2buff(buff, 256, fmt);
+    RunInfo.push_back(make_pair(title, buff));
+}
+
+void StatisticsCollector::SetStatisticFormula(const char *name, const char *formula)
+{
+    Formulas[name] = formula;
+}
+
+void StatisticsCollector::SetTitle(const char *fmt, ...)
+{
+    vargf2buff(buff, 256, fmt);
+    Title = buff;
+}
+
+string ExcelFormula(const string &fmt, size_t place, size_t rounds, bool is_horizontal)
+{
+    char buff[16];
+    if(is_horizontal)
+        snprintf(buff, 15, "RC[%u]:RC[%u]", unsigned(place), unsigned(place+rounds-1));
+    else
+        snprintf(buff, 15, "R[%u]C:R[%u]C", unsigned(place+1), unsigned(place+rounds));
+    string result(fmt); size_t pos = 0;
+    while ( (pos = result.find("ROUNDS", pos, 6)) != string::npos )
+        result.replace(pos, 6, buff);
+    return result;
+}
+
+void StatisticsCollector::Print(int dataOutput, const char *ModeName)
+{
+    FILE *OutputFile;
+    const char *file_suffix = getenv("STAT_SUFFIX");
+    if( !file_suffix ) file_suffix = "";
+    const char *file_format = getenv("STAT_FORMAT");
+    if( file_format ) {
+        dataOutput = 0;
+        if( strstr(file_format, "con")||strstr(file_format, "std") ) dataOutput |= StatisticsCollector::Stdout;
+        if( strstr(file_format, "txt")||strstr(file_format, "csv") ) dataOutput |= StatisticsCollector::TextFile;
+        if( strstr(file_format, "excel")||strstr(file_format, "xml") ) dataOutput |= StatisticsCollector::ExcelXML;
+        if( strstr(file_format, "htm") ) dataOutput |= StatisticsCollector::HTMLFile;
+        if( strstr(file_format, "pivot") ) dataOutput |= StatisticsCollector::PivotMode;
+    }
+    for(int i = 1; i < 10; i++) {
+        string env = Format("STAT_RUNINFO%d", i);
+        const char *info = getenv(env.c_str());
+        if( info ) {
+            string title(info);
+            size_t pos = title.find('=');
+            if( pos != string::npos ) {
+                env = title.substr(pos+1);
+                title.resize(pos);
+            } else env = title;
+            RunInfo.push_back(make_pair(title, env));
+        }
+    }
+
+    if (dataOutput & StatisticsCollector::Stdout)
+    {
+        printf("\n-=# %s #=-\n", Title.c_str());
+        if(SortMode == ByThreads)
+            printf("    Name    |  #  | %s ", ModeName);
+        else
+            printf("    Name    | %s |  #  ", ModeName);
+        for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++)
+            printf("|%s", i->c_str()+1);
+
+        for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+        {
+            if(SortMode == ByThreads)
+                printf("\n%12s|% 5d|%6s", i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str());
+            else
+                printf("\n%12s|%6s|% 5d", i->second->Name.c_str(), i->second->Mode.c_str(), i->second->Threads);
+            Analysis_t &analisis = i->second->Analysis;
+            AnalysisTitles_t::iterator t = AnalysisTitles.begin();
+            for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); t++)
+            {
+                char fmt[8]; snprintf(fmt, 7, "|%% %us", unsigned(max(size_t(3), t->size())));
+                if(*t != a->first)
+                    printf(fmt, "");
+                else {
+                    printf(fmt, a->second.c_str()); a++;
+                }
+            }
+        }
+        printf("\n");
+    }
+    if (dataOutput & StatisticsCollector::TextFile)
+    {
+        bool append = false;
+        const char *file_ext = ".txt";
+        if( file_format && strstr(file_format, "++") ) append = true;
+        if( file_format && strstr(file_format, "csv") ) file_ext = ".csv";
+        if ((OutputFile = fopen((Name+file_suffix+file_ext).c_str(), append?"at":"wt")) == NULL) {
+            printf("Can't open .txt file\n");
+        } else {
+            const char *delim = getenv("STAT_DELIMITER");
+            if( !delim || !delim[0] ) {
+                if( file_format && strstr(file_format, "csv") ) delim = ",";
+                else delim = "\t";
+            }
+            if( !append || !ftell(OutputFile) ) { // header needed
+                append = false;
+                if(SortMode == ByThreads) fprintf(OutputFile, "Name%s#%s%s", delim, delim, ModeName);
+                else fprintf(OutputFile, "Name%s%s%s#", delim, ModeName, delim);
+                for( size_t k = 0; k < RunInfo.size(); k++ )
+                    fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str());
+            }
+            if(dataOutput & StatisticsCollector::PivotMode) {
+                if( !append) fprintf(OutputFile, "%sColumn%sValue", delim, delim);
+                for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+                {
+                    string RowHead;
+                    if(SortMode == ByThreads)
+                        RowHead = Format("\n%s%s%d%s%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str(), delim);
+                    else
+                        RowHead = Format("\n%s%s%s%s%d%s", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads, delim);
+                    for( size_t k = 0; k < RunInfo.size(); k++ )
+                        RowHead.append(RunInfo[k].second + delim);
+                    Analysis_t &analisis = i->second->Analysis;
+                    for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++a)
+                        fprintf(OutputFile, "%s%s%s%s", RowHead.c_str(), a->first.c_str(), delim, a->second.c_str());
+                    Results_t &r = i->second->Results;
+                    for (size_t k = 0; k < r.size(); k++) {
+                        fprintf(OutputFile, "%s%s%s", RowHead.c_str(), RoundTitles[k].c_str(), delim);
+                        fprintf(OutputFile, ResultsFmt, r[k]);
+                    }
+                }
+            } else {
+                if( !append ) {
+                    for( size_t k = 0; k < RunInfo.size(); k++ )
+                        fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str());
+                    for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++)
+                        fprintf(OutputFile, "%s%s", delim, i->c_str()+1);
+                    for (size_t i = 0; i < RoundTitles.size(); i++)
+                        fprintf(OutputFile, "%s%s", delim, RoundTitles[i].c_str());
+                }
+                for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+                {
+                    if(SortMode == ByThreads)
+                        fprintf(OutputFile, "\n%s%s%d%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str());
+                    else
+                        fprintf(OutputFile, "\n%s%s%s%s%d", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads);
+                    for( size_t k = 0; k < RunInfo.size(); k++ )
+                        fprintf(OutputFile, "%s%s", delim, RunInfo[k].second.c_str());
+                    Analysis_t &analisis = i->second->Analysis;
+                    AnalysisTitles_t::iterator t = AnalysisTitles.begin();
+                    for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++t) {
+                        fprintf(OutputFile, "%s", delim);
+                        if(*t == a->first) {
+                            fprintf(OutputFile, "%s", a->second.c_str()); ++a;
+                        }
+                    }
+                    //data
+                    Results_t &r = i->second->Results;
+                    for (size_t k = 0; k < r.size(); k++)
+                    {
+                        fprintf(OutputFile, "%s", delim);
+                        fprintf(OutputFile, ResultsFmt, r[k]);
+                    }
+                }
+            }
+            fprintf(OutputFile, "\n");
+            fclose(OutputFile);
+        }
+    }
+    if (dataOutput & StatisticsCollector::HTMLFile)
+    {
+        if ((OutputFile = fopen((Name+file_suffix+".html").c_str(), "w+t")) == NULL) {
+            printf("Can't open .html file\n");
+        } else {
+            char TimerBuff[100], DateBuff[100];
+            GetTime(TimerBuff,sizeof(TimerBuff));
+            GetDate(DateBuff,sizeof(DateBuff));
+            fprintf(OutputFile, "<html><head>\n<title>%s</title>\n</head><body>\n", Title.c_str());
+            //-----------------------
+            fprintf(OutputFile, "<table id=\"h\" style=\"position:absolute;top:20\" border=1 cellspacing=0 cellpadding=2>\n");
+            fprintf(OutputFile, "<tr><td><a name=hr href=#vr onclick=\"v.style.visibility='visible';"
+                                "h.style.visibility='hidden';\">Flip[H]</a></td>"
+                                "<td>%s</td><td>%s</td><td colspan=%u>%s",
+                DateBuff, TimerBuff, unsigned(AnalysisTitles.size() + RoundTitles.size()), Title.c_str());
+            for( size_t k = 0; k < RunInfo.size(); k++ )
+                fprintf(OutputFile, "; %s: %s", RunInfo[k].first.c_str(), RunInfo[k].second.c_str());
+            fprintf(OutputFile, "</td></tr>\n<tr bgcolor=#CCFFFF><td>Name</td><td>Threads</td><td>%s</td>", ModeName);
+            for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++)
+                fprintf(OutputFile, "<td>%s</td>", i->c_str()+1);
+            for (size_t i = 0; i < RoundTitles.size(); i++)
+                fprintf(OutputFile, "<td>%s</td>", RoundTitles[i].c_str());
+            for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+            {
+                fprintf(OutputFile, "</tr>\n<tr><td bgcolor=#CCFFCC>%s</td><td bgcolor=#CCFFCC>%d</td><td bgcolor=#CCFFCC>%4s</td>",
+                    i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str());
+                //statistics
+                AnalysisTitles_t::iterator t = AnalysisTitles.begin();
+                for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); t++)
+                {
+                    fprintf(OutputFile, "<td bgcolor=#FFFF99>%s</td>", (*t != j->first)?" ":(i->second->Analysis[j->first]).c_str());
+                    if(*t == j->first) j++;
+                }
+                //data
+                Results_t &r = i->second->Results;
+                for (size_t k = 0; k < r.size(); k++)
+                {
+                    fprintf(OutputFile, "<td>");
+                    fprintf(OutputFile, ResultsFmt, r[k]);
+                    fprintf(OutputFile, "</td>");
+                }
+            }
+            fprintf(OutputFile, "</tr>\n</table>\n");
+            //////////////////////////////////////////////////////
+            fprintf(OutputFile, "<table id=\"v\" style=\"visibility:hidden;position:absolute;top:20\" border=1 cellspacing=0 cellpadding=2>\n");
+            fprintf(OutputFile, "<tr><td><a name=vr href=#hr onclick=\"h.style.visibility='visible';"
+                                "v.style.visibility='hidden';\">Flip[V]</a></td>\n"
+                                "<td>%s</td><td>%s</td><td colspan=%u>%s</td>", 
+                DateBuff, TimerBuff, unsigned(max(Statistics.size()-2,size_t(1))), Title.c_str());
+
+            fprintf(OutputFile, "</tr>\n<tr bgcolor=#CCFFCC><td bgcolor=#CCFFFF>Name</td>");
+            for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+                fprintf(OutputFile, "<td>%s</td>", i->second->Name.c_str());
+            fprintf(OutputFile, "</tr>\n<tr bgcolor=#CCFFCC><td bgcolor=#CCFFFF>Threads</td>");
+            for (Statistics_t::iterator n = Statistics.begin(); n != Statistics.end(); n++)
+                fprintf(OutputFile, "<td>%d</td>", n->second->Threads);
+            fprintf(OutputFile, "</tr>\n<tr bgcolor=#CCFFCC><td bgcolor=#CCFFFF>%s</td>", ModeName);
+            for (Statistics_t::iterator m = Statistics.begin(); m != Statistics.end(); m++)
+                fprintf(OutputFile, "<td>%s</td>", m->second->Mode.c_str());
+
+            for (AnalysisTitles_t::iterator t = AnalysisTitles.begin(); t != AnalysisTitles.end(); t++)
+            {
+                fprintf(OutputFile, "</tr>\n<tr bgcolor=#FFFF99><td bgcolor=#CCFFFF>%s</td>", t->c_str()+1);
+                for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+                    fprintf(OutputFile, "<td>%s</td>", i->second->Analysis.count(*t)?i->second->Analysis[*t].c_str():" ");
+            }
+
+            for (size_t r = 0; r < RoundTitles.size(); r++)
+            {
+                fprintf(OutputFile, "</tr>\n<tr><td bgcolor=#CCFFFF>%s</td>", RoundTitles[r].c_str());
+                for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+                {
+                    Results_t &result = i->second->Results;
+                    fprintf(OutputFile, "<td>");
+                    if(result.size() > r)
+                        fprintf(OutputFile, ResultsFmt, result[r]);
+                    fprintf(OutputFile, "</td>");
+                }
+            }
+            fprintf(OutputFile, "</tr>\n</table>\n</body></html>\n");
+            fclose(OutputFile);
+        }
+    }
+    if (dataOutput & StatisticsCollector::ExcelXML)
+    {
+        if ((OutputFile = fopen((Name+file_suffix+".xml").c_str(), "w+t")) == NULL) {
+            printf("Can't open .xml file\n");
+        } else {
+            // TODO:PivotMode
+            char UserName[100];
+            char TimerBuff[100], DateBuff[100];
+#if _WIN32 || _WIN64
+            strcpy(UserName,getenv("USERNAME"));
+#else
+            strcpy(UserName,getenv("USER"));
+#endif
+            //--------------------------------
+            GetTime(TimerBuff,sizeof(TimerBuff));
+            GetDate(DateBuff,sizeof(DateBuff));
+            //--------------------------
+            fprintf(OutputFile, XMLHead, UserName, TimerBuff);
+            fprintf(OutputFile, XMLStyles);
+            fprintf(OutputFile, XMLBeginSheet, "Horizontal");
+            fprintf(OutputFile, XMLNames,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS));
+            fprintf(OutputFile, XMLBeginTable, int(RoundTitles.size()+Formulas.size()+AnalysisTitles.size()+COUNT_PARAMETERS+1/*title*/), int(Statistics.size()+1));
+            fprintf(OutputFile, XMLBRow);
+            fprintf(OutputFile, XMLCellTopName);
+            fprintf(OutputFile, XMLCellTopThread);
+            fprintf(OutputFile, XMLCellTopMode, ModeName);
+            for (AnalysisTitles_t::iterator j = AnalysisTitles.begin(); j != AnalysisTitles.end(); j++)
+                fprintf(OutputFile, XMLAnalysisTitle, j->c_str()+1);
+            for (Formulas_t::iterator j = Formulas.begin(); j != Formulas.end(); j++)
+                fprintf(OutputFile, XMLAnalysisTitle, j->first.c_str()+1);
+            for (RoundTitles_t::iterator j = RoundTitles.begin(); j != RoundTitles.end(); j++)
+                fprintf(OutputFile, XMLAnalysisTitle, j->c_str());
+            string Info = Title;
+            for( size_t k = 0; k < RunInfo.size(); k++ )
+                Info.append("; " + RunInfo[k].first + "=" + RunInfo[k].second);
+            fprintf(OutputFile, XMLCellEmptyWhite, Info.c_str());
+            fprintf(OutputFile, XMLERow);
+            //------------------------
+            for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++)
+            {
+                fprintf(OutputFile, XMLBRow);
+                fprintf(OutputFile, XMLCellName,  i->second->Name.c_str());
+                fprintf(OutputFile, XMLCellThread,i->second->Threads);
+                fprintf(OutputFile, XMLCellMode,  i->second->Mode.c_str());
+                //statistics
+                AnalysisTitles_t::iterator at = AnalysisTitles.begin();
+                for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); at++)
+                {
+                    fprintf(OutputFile, XMLCellAnalysis, (*at != j->first)?"":(i->second->Analysis[j->first]).c_str());
+                    if(*at == j->first) j++;
+                }
+                //formulas
+                size_t place = 0;
+                Results_t &v = i->second->Results;
+                for (Formulas_t::iterator f = Formulas.begin(); f != Formulas.end(); f++, place++)
+                    fprintf(OutputFile, XMLCellFormula, ExcelFormula(f->second, Formulas.size()-place, v.size(), true).c_str());
+                //data
+                for (size_t k = 0; k < v.size(); k++)
+                {
+                    fprintf(OutputFile, XMLCellData, v[k]);
+                }
+                if(v.size() < RoundTitles.size())
+                    fprintf(OutputFile, XMLMergeRow, int(RoundTitles.size() - v.size()));
+                fprintf(OutputFile, XMLERow);
+            }
+            //------------------------
+            fprintf(OutputFile, XMLEndTable);
+            fprintf(OutputFile, XMLWorkSheetProperties,1,1,3,3,int(RoundTitles.size()+AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS));
+            fprintf(OutputFile, XMLAutoFilter,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS));
+            fprintf(OutputFile, XMLEndWorkSheet);
+            //----------------------------------------
+            fprintf(OutputFile, XMLEndWorkbook);
+            fclose(OutputFile);
+        }
+    }
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.h b/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics.h
new file mode 100644 (file)
index 0000000..5279bd8
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Internal Intel tool
+
+#ifndef __STATISTICS_H__
+#define __STATISTICS_H__
+
+#define _CRT_SECURE_NO_DEPRECATE 1
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <vector>
+#include <map>
+#include <set>
+#include <string>
+#include <time.h>
+
+using namespace std;
+typedef double value_t;
+
+/*
+   Statistical collector class.
+  
+   Resulting table output:
+        +---------------------------------------------------------------------------+
+        | [Date] <Title>...                                                         |
+        +----------+----v----+--v---+----------------+------------+-..-+------------+
+        | TestName | Threads | Mode | Rounds results | Stat_type1 | .. | Stat_typeN |
+        +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+
+        |          |         |      | | | | .. | | | |            |    |            |
+        ..        ...       ...     ..................            ......           ..
+        |          |         |      | | | | .. | | | |            |    |            |
+        +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+
+
+   Iterating table output:
+        +---------------------------------------------------------------------------+
+        | [Date] <TestName>, Threads: <N>, Mode: <M>; for <Title>...                |
+        +----------+----v----+--v---+----------------+------------+-..-+------------+
+        
+*/
+
+class StatisticsCollector
+{
+public:
+    typedef map<string, string> Analysis_t;
+    typedef vector<value_t> Results_t;
+
+protected:
+    StatisticsCollector(const StatisticsCollector &);
+
+    struct StatisticResults
+    {
+        string              Name;
+        string              Mode;
+        int                 Threads;
+        Results_t           Results;
+        Analysis_t          Analysis;
+    };
+
+    // internal members
+       //bool OpenFile;
+    StatisticResults *CurrentKey;
+    string Title;
+    const char /**Name,*/ *ResultsFmt;
+       string Name;
+    //! Data
+    typedef map<string, StatisticResults*> Statistics_t;
+    Statistics_t Statistics;
+    typedef vector<string> RoundTitles_t;
+    RoundTitles_t RoundTitles;
+    //TODO: merge those into one structure
+    typedef map<string, string> Formulas_t;
+    Formulas_t   Formulas;
+    typedef set<string> AnalysisTitles_t;
+    AnalysisTitles_t AnalysisTitles;
+    typedef vector<pair<string, string> > RunInfo_t;
+    RunInfo_t RunInfo;
+
+public:
+    struct TestCase {
+        StatisticResults *access;
+        TestCase() : access(0) {}
+        TestCase(StatisticResults *link) : access(link) {}
+        const char *getName() const { return access->Name.c_str(); }
+        const char *getMode() const { return access->Mode.c_str(); }
+        int getThreads()       const { return access->Threads; }
+        const Results_t &getResults() const { return access->Results; }
+        const Analysis_t &getAnalysis() const { return access->Analysis; }
+    };
+
+    enum Sorting {
+        ByThreads, ByAlg
+    };
+
+    //! Data and output types
+    enum DataOutput {
+        // Verbosity level enumeration
+        Statistic = 1,     //< Analytical data - computed after all iterations and rounds passed
+        Result    = 2,     //< Testing data    - collected after all iterations passed
+        Iteration = 3,     //< Verbose data    - collected at each iteration (for each size - in case of containers)
+        // ExtraVerbose is not applicabe yet :) be happy, but flexibility is always welcome
+
+        // Next constants are bit-fields
+        Stdout   = 1<<8,    //< Output to the console
+        TextFile = 1<<9,    //< Output to plain text file "name.txt" (delimiter is TAB by default)
+        ExcelXML = 1<<10,   //< Output to Excel-readable XML-file "name.xml"
+        HTMLFile = 1<<11,   //< Output to HTML file "name.html"
+        PivotMode= 1<<15    //< Puts all the rounds into one columt to better fit for pivot table in Excel
+    };
+
+    //! Constructor. Specify tests set name which used as name of output files
+    StatisticsCollector(const char *name, Sorting mode = ByThreads, const char *fmt = "%g")
+        :  CurrentKey(NULL), ResultsFmt(fmt), Name(name), SortMode(mode) {}
+
+    ~StatisticsCollector();
+
+    //! Set tests set title, supporting printf-like arguments
+    void SetTitle(const char *fmt, ...);
+
+    //! Specify next test key
+    TestCase SetTestCase(const char *name, const char *mode, int threads);
+    //! Specify next test key
+    void SetTestCase(const TestCase &t) { SetTestCase(t.getName(), t.getMode(), t.getThreads()); }
+    //! Reserve specified number of rounds. Use for effeciency. Used mostly internally
+    void ReserveRounds(size_t index);
+    //! Add result of the measure
+    void AddRoundResult(const TestCase &, value_t v);
+    //! Add result of the current measure
+    void AddRoundResult(value_t v) { if(CurrentKey) AddRoundResult(TestCase(CurrentKey), v); }
+    //! Add title of round
+    void SetRoundTitle(size_t index, const char *fmt, ...);
+    //! Add numbered title of round
+    void SetRoundTitle(size_t index, int num) { SetRoundTitle(index, "%d", num); }
+    //! Get number of rounds
+    size_t GetRoundsCount() const { return RoundTitles.size(); }
+    // Set statistic value for the test
+    void AddStatisticValue(const TestCase &, const char *type, const char *fmt, ...);
+    // Set statistic value for the current test
+    void AddStatisticValue(const char *type, const char *fmt, ...);
+    //! Add Excel-processing formulas. @arg formula can contain more than one instances of
+    //! ROUNDS template which transforms into the range of cells with result values
+    //TODO://! #1 .. #n templates represent data cells from the first to the last
+    //TODO: merge with Analisis
+    void SetStatisticFormula(const char *name, const char *formula);
+    //! Add information about run or compile parameters
+    void SetRunInfo(const char *title, const char *fmt, ...);
+    void SetRunInfo(const char *title, int num) { SetRunInfo(title, "%d", num); }
+
+    //! Data output
+    void Print(int dataOutput, const char *ModeName = "Mode");
+
+private:
+    Sorting SortMode;
+};
+
+//! using: Func(const char *fmt, ...) { vargf2buff(buff, 128, fmt);...
+#define vargf2buff(name, size, fmt) char name[size]; memset(name, 0, size); va_list args; va_start(args, fmt); vsnprintf( name, size-1, fmt, args)
+
+inline std::string Format(const char *fmt, ...) {
+    vargf2buff(buf, 1024, fmt); // from statistics.h
+    return std::string(buf);
+}
+
+#ifdef STATISTICS_INLINE
+#include "statistics.cpp"
+#endif
+#endif //__STATISTICS_H__
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics_xml.h b/deal.II/contrib/tbb/tbb30_104oss/src/perf/statistics_xml.h
new file mode 100644 (file)
index 0000000..7be259e
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+const char XMLBRow[]=
+"   <Row>\n";
+
+const char XMLERow[]=
+"   </Row>\n";
+
+const char XMLHead[]=
+"<?xml version=\"1.0\"?>\n"
+"<?mso-application progid=\"Excel.Sheet\"?>\n\
+<Workbook xmlns=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\
+ xmlns:o=\"urn:schemas-microsoft-com:office:office\"\n\
+ xmlns:x=\"urn:schemas-microsoft-com:office:excel\"\n\
+ xmlns:ss=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\
+ xmlns:html=\"http://www.w3.org/TR/REC-html40\">\n\
+ <DocumentProperties xmlns=\"urn:schemas-microsoft-com:office:office\">\n\
+  <Author>%s</Author>\n\
+  <Created>%s</Created>\n\
+  <Company>Intel Corporation</Company>\n\
+ </DocumentProperties>\n\
+ <ExcelWorkbook xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\
+  <RefModeR1C1/>\n\
+ </ExcelWorkbook>\n";
+ const char XMLStyles[]=
+ " <Styles>\n\
+  <Style ss:ID=\"Default\" ss:Name=\"Normal\">\n\
+   <Alignment ss:Vertical=\"Bottom\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\
+  </Style>\n\
+  <Style ss:ID=\"s26\">\n\
+   <Alignment ss:Vertical=\"Top\"  ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\
+   <Borders>\n\
+    <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+   </Borders>\n\
+   <Interior ss:Color=\"#FFFF99\" ss:Pattern=\"Solid\"/>\n\
+  </Style>\n\
+  <Style ss:ID=\"s25\">\n\
+   <Alignment ss:Vertical=\"Top\"  ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\
+   <Borders>\n\
+    <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+   </Borders>\n\
+   <Interior ss:Color=\"#CCFFFF\" ss:Pattern=\"Solid\"/>\n\
+  </Style>\n\
+  <Style ss:ID=\"s24\">\n\
+   <Alignment ss:Vertical=\"Top\"  ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\
+   <Borders>\n\
+    <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+   </Borders>\n\
+   <Interior ss:Color=\"#CCFFCC\" ss:Pattern=\"Solid\"/>\n\
+  </Style>\n\
+  <Style ss:ID=\"s23\">\n\
+   <Alignment ss:Vertical=\"Top\"  ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\
+   <Borders>\n\
+    <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+    <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\
+   </Borders>\n\
+  </Style>\n\
+ </Styles>\n";
+
+const char XMLBeginSheet[]=
+" <Worksheet ss:Name=\"%s\">\n";
+
+const char XMLNames[]=
+"  <Names>\n\
+   <NamedRange ss:Name=\"_FilterDatabase\" ss:RefersTo=\"R%dC%d:R%dC%d\" ss:Hidden=\"1\"/>\n\
+  </Names>\n";
+
+const char XMLBeginTable[]=
+"  <Table ss:ExpandedColumnCount=\"%d\" ss:ExpandedRowCount=\"%d\" x:FullColumns=\"1\"\n\
+   x:FullRows=\"1\">\n";
+   
+const char XMLColumsHorizontalTable[]=
+"   <Column ss:Index=\"1\" ss:Width=\"108.75\"/>\n\
+   <Column ss:Index=\"%d\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n";
+const char XMLColumsVerticalTable[]= 
+"   <Column ss:Index=\"1\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n";
+
+const char XMLNameAndTime[]=
+"    <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\
+    <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\
+    <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLTableParamAndTitle[]=
+"    <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\
+    <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\
+    <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\
+    <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+//--------------
+const char XMLCellTopName[]=
+"   <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Name</Data></Cell>\n";
+const char XMLCellTopThread[]=
+"   <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Threads</Data></Cell>\n";
+const char XMLCellTopMode[]=
+"   <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n";
+//---------------------
+const char XMLAnalysisTitle[]=
+"   <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLCellName[]=
+"    <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLCellThread[]=
+"    <Cell ss:StyleID=\"s24\"><Data ss:Type=\"Number\">%d</Data></Cell>\n";
+
+const char XMLCellMode[]=
+"    <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLCellAnalysis[]=
+"    <Cell ss:StyleID=\"s26\"><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLCellFormula[]=
+"    <Cell ss:StyleID=\"s26\" ss:Formula=\"%s\"><Data ss:Type=\"Number\"></Data></Cell>\n";
+
+const char XMLCellData[]=
+"    <Cell ss:StyleID=\"s23\"><Data ss:Type=\"Number\">%g</Data></Cell>\n";
+
+const char XMLMergeRow[]=
+"   <Cell ss:StyleID=\"s23\" ss:MergeAcross=\"%d\" ><Data ss:Type=\"String\"></Data></Cell>\n";
+
+const char XMLCellEmptyWhite[]=
+"    <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n";
+
+const char XMLCellEmptyTitle[]=
+"    <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\"></Data></Cell>\n";
+
+const char XMLEndTable[]=
+"  </Table>\n";
+
+const char XMLAutoFilter[]=
+"  <AutoFilter x:Range=\"R%dC%d:R%dC%d\" xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\
+  </AutoFilter>\n";
+
+const char XMLEndWorkSheet[]=
+ " </Worksheet>\n";
+
+const char XMLWorkSheetProperties[]=
+"  <WorksheetOptions xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\
+   <Unsynced/>\n\
+   <Selected/>\n\
+   <FreezePanes/>\n\
+   <FrozenNoSplit/>\n\
+   <SplitHorizontal>%d</SplitHorizontal>\n\
+   <TopRowBottomPane>%d</TopRowBottomPane>\n\
+   <SplitVertical>%d</SplitVertical>\n\
+   <LeftColumnRightPane>%d</LeftColumnRightPane>\n\
+   <ActivePane>0</ActivePane>\n\
+   <Panes>\n\
+    <Pane>\n\
+     <Number>3</Number>\n\
+    </Pane>\n\
+    <Pane>\n\
+     <Number>1</Number>\n\
+    </Pane>\n\
+    <Pane>\n\
+     <Number>2</Number>\n\
+    </Pane>\n\
+    <Pane>\n\
+     <Number>0</Number>\n\
+     <ActiveRow>0</ActiveRow>\n\
+     <ActiveCol>%d</ActiveCol>\n\
+    </Pane>\n\
+   </Panes>\n\
+   <ProtectObjects>False</ProtectObjects>\n\
+   <ProtectScenarios>False</ProtectScenarios>\n\
+  </WorksheetOptions>\n";
+
+const char XMLEndWorkbook[]=
+ "</Workbook>\n";
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_framework.h b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_framework.h
new file mode 100644 (file)
index 0000000..adc9b79
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TIME_FRAMEWORK_H__
+#define __TIME_FRAMEWORK_H__
+
+#include <cstdlib>
+#include <math.h>
+#include <vector>
+#include <string>
+#include <sstream>
+#include "tbb/tbb_stddef.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/tick_count.h"
+#define HARNESS_CUSTOM_MAIN 1
+#include "../test/harness.h"
+#include "../test/harness_barrier.h"
+#define STATISTICS_INLINE
+#include "statistics.h"
+
+#ifndef ARG_TYPE
+typedef intptr_t arg_t;
+#else
+typedef ARG_TYPE arg_t;
+#endif
+
+class Timer {
+    tbb::tick_count tick;
+public:
+    Timer() { tick = tbb::tick_count::now(); }
+    double get_time()  { return (tbb::tick_count::now() - tick).seconds(); }
+    double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); }
+    double mark_time() { tbb::tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); }
+    double mark_time(const Timer &newer) { tbb::tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); }
+};
+
+class TesterBase /*: public tbb::internal::no_copy*/ {
+protected:
+    friend class TestProcessor;
+    friend class TestRunner;
+
+    //! it is barrier for synchronizing between threads
+    Harness::SpinBarrier *barrier;
+    
+    //! number of tests per this tester
+    const int tests_count;
+    
+    //! number of threads to operate
+    int threads_count;
+
+    //! some value for tester
+    arg_t value;
+
+    //! tester name
+    const char *tester_name;
+
+    // avoid false sharing
+    char pad[128 - sizeof(arg_t) - sizeof(int)*2 - sizeof(void*)*2 ];
+
+public:
+    //! init tester base. @arg ntests is number of embeded tests in this tester.
+    TesterBase(int ntests)
+        : barrier(NULL), tests_count(ntests)
+    {}
+    virtual ~TesterBase() {}
+
+    //! internal function
+    void base_init(arg_t v, int t, Harness::SpinBarrier &b) {
+        threads_count = t;
+        barrier = &b;
+        value = v;
+        init();
+    }
+
+    //! optionally override to init after value and threads count were set.
+    virtual void init() { }
+
+    //! Override to provide your names
+    virtual std::string get_name(int testn) {
+        return Format("test %d", testn);
+    }
+
+    //! optionally override to init test mode just before execution for a given thread number.
+    virtual void test_prefix(int testn, int threadn) { }
+
+    //! Override to provide main test's entry function returns a value to record
+    virtual value_t test(int testn, int threadn) = 0;
+
+    //! Type of aggregation from results of threads
+    enum result_t {
+        SUM, AVG, MIN, MAX
+    };
+
+    //! Override to change result type for the test. Return postfix for test name or 0 if result type is not needed.
+    virtual const char *get_result_type(int /*testn*/, result_t type) const {
+        return type == AVG ? "" : 0; // only average result by default
+    }
+};
+
+/*****
+a user's tester concept:
+
+class tester: public TesterBase {
+public:
+    //! init tester with known amount of work
+    tester() : TesterBase(<user-specified tests count>) { ... }
+
+    //! run a test with sequental number @arg test_number for @arg thread.
+    / *override* / value_t test(int test_number, int thread);
+};
+
+******/
+
+template<typename Tester, int scale = 1>
+class TimeTest : public Tester {
+    /*override*/ value_t test(int testn, int threadn) {
+        Timer timer;
+        Tester::test(testn, threadn);
+        return timer.get_time() * double(scale);
+    }
+};
+
+template<typename Tester>
+class NanosecPerValue : public Tester {
+    /*override*/ value_t test(int testn, int threadn) {
+        Timer timer;
+        Tester::test(testn, threadn);
+        // return time (ns) per value
+        return timer.get_time()*1000000.0/double(Tester::value);
+    }
+};
+
+template<typename Tester, int scale = 1>
+class ValuePerSecond : public Tester {
+    /*override*/ value_t test(int testn, int threadn) {
+        Timer timer;
+        Tester::test(testn, threadn);
+        // return value per seconds/scale
+        return double(Tester::value)/(timer.get_time()*scale);
+    }
+};
+
+template<typename Tester, int scale = 1>
+class NumberPerSecond : public Tester {
+    /*override*/ value_t test(int testn, int threadn) {
+        Timer timer;
+        Tester::test(testn, threadn);
+        // return a scale per seconds
+        return double(scale)/timer.get_time();
+    }
+};
+
+// operate with single tester
+class TestRunner {
+    friend class TestProcessor;
+    friend struct RunArgsBody;
+    TestRunner(const TestRunner &); // don't copy
+
+    const char *tester_name;
+    StatisticsCollector *stat;
+    std::vector<std::vector<StatisticsCollector::TestCase> > keys;
+
+public:
+    TesterBase &tester;
+
+    template<typename Test>
+    TestRunner(const char *name, Test *test)
+        : tester_name(name), tester(*static_cast<TesterBase*>(test))
+    {
+        test->tester_name = name;
+    }
+    
+    ~TestRunner() { delete &tester; }
+
+    void init(arg_t value, int threads, Harness::SpinBarrier &barrier, StatisticsCollector *s) {
+        tester.base_init(value, threads, barrier);
+        stat = s;
+        keys.resize(tester.tests_count);
+        for(int testn = 0; testn < tester.tests_count; testn++) {
+            keys[testn].resize(threads);
+            std::string test_name(tester.get_name(testn));
+            for(int threadn = 0; threadn < threads; threadn++)
+                keys[testn][threadn] = stat->SetTestCase(tester_name, test_name.c_str(), threadn);
+        }
+    }
+
+    void run_test(int threadn) {
+        for(int testn = 0; testn < tester.tests_count; testn++) {
+            tester.test_prefix(testn, threadn);
+            tester.barrier->wait();                                 // <<<<<<<<<<<<<<<<< Barrier before running test mode
+            value_t result = tester.test(testn, threadn);
+            stat->AddRoundResult(keys[testn][threadn], result);
+        }
+    }
+
+    void post_process(StatisticsCollector &report) {
+        const int threads = tester.threads_count;
+        for(int testn = 0; testn < tester.tests_count; testn++) {
+            size_t coln = keys[testn][0].getResults().size()-1;
+            value_t rsum = keys[testn][0].getResults()[coln];
+            value_t rmin = rsum, rmax = rsum;
+            for(int threadn = 1; threadn < threads; threadn++) {
+                value_t result = keys[testn][threadn].getResults()[coln];
+                rsum += result; // for both SUM or AVG
+                if(rmin > result) rmin = result;
+                if(rmax < result) rmax = result;
+            }
+            std::string test_name(tester.get_name(testn));
+            const char *rname = tester.get_result_type(testn, TesterBase::SUM);
+            if( rname ) {
+                report.SetTestCase(tester_name, (test_name+rname).c_str(), threads);
+                report.AddRoundResult(rsum);
+            }
+            rname = tester.get_result_type(testn, TesterBase::MIN);
+            if( rname ) {
+                report.SetTestCase(tester_name, (test_name+rname).c_str(), threads);
+                report.AddRoundResult(rmin);
+            }
+            rname = tester.get_result_type(testn, TesterBase::AVG);
+            if( rname ) {
+                report.SetTestCase(tester_name, (test_name+rname).c_str(), threads);
+                report.AddRoundResult(rsum / threads);
+            }
+            rname = tester.get_result_type(testn, TesterBase::MAX);
+            if( rname ) {
+                report.SetTestCase(tester_name, (test_name+rname).c_str(), threads);
+                report.AddRoundResult(rmax);
+            }
+        }
+    }
+};
+
+struct RunArgsBody {
+    const vector<TestRunner*> &run_list;
+    RunArgsBody(const vector<TestRunner*> &a) : run_list(a) { }
+#ifndef __TBB_parallel_for_H
+    void operator()(int thread) const {
+#else
+    void operator()(const tbb::blocked_range<int> &r) const {
+        ASSERT( r.begin() + 1 == r.end(), 0);
+        int thread = r.begin();
+#endif
+        for(size_t i = 0; i < run_list.size(); i++)
+            run_list[i]->run_test(thread);
+    }
+};
+
+//! Main test processor.
+/** Override or use like this:
+ class MyTestCollection : public TestProcessor {
+    void factory(arg_t value, int threads) {
+        process( value, threads,
+            run("my1", new tester<my1>() ),
+            run("my2", new tester<my2>() ),
+        end );
+        if(value == threads)
+            stat->Print();
+    }
+};
+*/
+
+class TestProcessor {
+    friend class TesterBase;
+
+    // <threads, collector>
+    typedef std::map<int, StatisticsCollector *> statistics_collection;
+    statistics_collection stat_by_threads;
+
+protected:
+    // Members
+    const char *collection_name;
+    // current stat
+    StatisticsCollector *stat;
+    // token
+    size_t end;
+
+public:
+    StatisticsCollector report;
+
+    // token of tests list
+    template<typename Test>
+    TestRunner *run(const char *name, Test *test) {
+        return new TestRunner(name, test);
+    }
+
+    // iteration processing
+    void process(arg_t value, int threads, ...) {
+        // prepare items
+        stat = stat_by_threads[threads];
+        if(!stat) {
+            stat_by_threads[threads] = stat = new StatisticsCollector((collection_name + Format("@%d", threads)).c_str(), StatisticsCollector::ByAlg);
+            stat->SetTitle("Detailed log of %s running with %d threads.", collection_name, threads);
+        }
+        Harness::SpinBarrier barrier(threads);
+        // init args
+        va_list args; va_start(args, threads);
+        vector<TestRunner*> run_list; run_list.reserve(16);
+        while(true) {
+            TestRunner *item = va_arg(args, TestRunner*);
+            if( !item ) break;
+            item->init(value, threads, barrier, stat);
+            run_list.push_back(item);
+        }
+        va_end(args);
+        std::ostringstream buf;
+        buf << value;
+        const size_t round_number = stat->GetRoundsCount();
+        stat->SetRoundTitle(round_number, buf.str().c_str());
+        report.SetRoundTitle(round_number, buf.str().c_str());
+        // run them
+#ifndef __TBB_parallel_for_H
+        NativeParallelFor(threads, RunArgsBody(run_list));
+#else
+        tbb::parallel_for(tbb::blocked_range<int>(0,threads,1), RunArgsBody(run_list));
+#endif
+        // destroy args
+        for(size_t i = 0; i < run_list.size(); i++) {
+            run_list[i]->post_process(report);
+            delete run_list[i];
+        }
+    }
+
+public:
+    TestProcessor(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg)
+        : collection_name(name), stat(NULL), end(0), report(collection_name, sort_by)
+    { }
+
+    ~TestProcessor() {
+        for(statistics_collection::iterator i = stat_by_threads.begin(); i != stat_by_threads.end(); i++)
+            delete i->second;
+    }
+};
+
+#endif// __TIME_FRAMEWORK_H__
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map.cpp
new file mode 100644 (file)
index 0000000..10205d5
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// configuration:
+#define TBB_USE_THREADING_TOOLS 0
+
+//! enable/disable std::map tests
+#define STDTABLE 0
+
+//! enable/disable old implementation tests (correct include file also)
+#define OLDTABLE 0
+#define OLDTABLEHEADER "tbb/concurrent_hash_map-5468.h"//-4329
+
+//! enable/disable experimental implementation tests (correct include file also)
+#define TESTTABLE 1
+#define TESTTABLEHEADER "tbb/concurrent_unordered_map.h"
+
+//! avoid erase()
+#define TEST_ERASE 0
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#include <cstdlib>
+#include <math.h>
+#include "tbb/tbb_stddef.h"
+#include <vector>
+#include <map>
+// needed by hash_maps
+#include <stdexcept>
+#include <iterator>
+#include <algorithm>                 // std::swap
+#include <utility>      // Need std::pair from here
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/aligned_space.h"
+#include "tbb/atomic.h"
+#include "tbb/_concurrent_unordered_internal.h"
+// for test
+#include "tbb/spin_mutex.h"
+#include "time_framework.h"
+
+
+using namespace tbb;
+using namespace tbb::internal;
+
+struct IntHashCompare {
+    size_t operator() ( int x ) const { return x; }
+    bool operator() ( int x, int y ) const { return x==y; }
+    static long hash( int x ) { return x; }
+    bool equal( int x, int y ) const { return x==y; }
+};
+
+namespace version_current {
+    namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } }
+    namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } }
+    #include "tbb/concurrent_hash_map.h"
+}
+typedef version_current::tbb::concurrent_hash_map<int,int> IntTable;
+
+#if OLDTABLE
+#undef __TBB_concurrent_hash_map_H
+namespace version_base {
+    namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } }
+    namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } }
+    #include OLDTABLEHEADER
+}
+typedef version_base::tbb::concurrent_hash_map<int,int> OldTable;
+#endif
+
+#if TESTTABLE
+#undef __TBB_concurrent_hash_map_H
+namespace version_new {
+    namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } }
+    namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } }
+    #include TESTTABLEHEADER
+}
+typedef version_new::tbb::concurrent_unordered_map<int,int> TestTable;
+#define TESTTABLE 1
+#endif
+
+///////////////////////////////////////
+
+static const char *map_testnames[] = {
+    "1.insert", "2.count1st", "3.count2nd", "4.insert existing", "5.erase"
+};
+
+template<typename TableType>
+struct TestTBBMap : TesterBase {
+    TableType Table;
+    int n_items;
+
+    TestTBBMap() : TesterBase(4+TEST_ERASE), Table(MaxThread*4) {}
+    void init() { n_items = value/threads_count; }
+
+    std::string get_name(int testn) {
+        return std::string(map_testnames[testn]);
+    }
+
+    double test(int test, int t)
+    {
+        switch(test) {
+          case 0: // fill
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                Table.insert( std::make_pair(i,i) );
+            }
+            break;
+          case 1: // work1
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                size_t c = Table.count( i );
+                ASSERT( c == 1, NULL);
+            }
+            break;
+          case 2: // work2
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                Table.count( i );
+            }
+            break;
+          case 3: // work3
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                Table.insert( std::make_pair(i,i) );
+            }
+            break;
+#if TEST_ERASE
+          case 4: // clean
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                ASSERT( Table.erase( i ), NULL);
+            }
+#endif
+        }
+        return 0;
+    }
+};
+
+template<typename M>
+struct TestSTLMap : TesterBase {
+    std::map<int, int> Table;
+    M mutex;
+
+    int n_items;
+    TestSTLMap() : TesterBase(4+TEST_ERASE) {}
+    void init() { n_items = value/threads_count; }
+
+    std::string get_name(int testn) {
+        return std::string(map_testnames[testn]);
+    }
+
+    double test(int test, int t)
+    {
+        switch(test) {
+          case 0: // fill
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                typename M::scoped_lock with(mutex);
+                Table[i] = 0;
+            }
+            break;
+          case 1: // work1
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                typename M::scoped_lock with(mutex);
+                size_t c = Table.count(i);
+                ASSERT( c == 1, NULL);
+            }
+            break;
+          case 2: // work2
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                typename M::scoped_lock with(mutex);
+                Table.count(i);
+            }
+            break;
+          case 3: // work3
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                typename M::scoped_lock with(mutex);
+                Table.insert(std::make_pair(i,i));
+            }
+            break;
+          case 4: // clean
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                typename M::scoped_lock with(mutex);
+                Table.erase(i);
+            }
+        }
+        return 0;
+    }
+};
+
+class fake_mutex {
+public:
+    class scoped_lock {
+        fake_mutex *p;
+
+    public:
+        scoped_lock() {}
+        scoped_lock( fake_mutex &m ) { p = &m; }
+        ~scoped_lock() { }
+        void acquire( fake_mutex &m ) { p = &m; }
+        void release() { }
+    };
+};
+
+class test_hash_map : public TestProcessor {
+public:
+    test_hash_map() : TestProcessor("test_hash_map") {}
+    void factory(int value, int threads) {
+        if(Verbose) printf("Processing with %d threads: %d...\n", threads, value);
+        process( value, threads,
+#if STDTABLE
+            run("std::map ", new NanosecPerValue<TestSTLMap<spin_mutex> >() ),
+#endif
+#if OLDTABLE
+            run("old::hmap", new NanosecPerValue<TestTBBMap<OldTable> >() ),
+#endif
+            run("tbb::hmap", new NanosecPerValue<TestTBBMap<IntTable> >() ),
+#if TESTTABLE
+            run("new::hmap", new NanosecPerValue<TestTBBMap<TestTable> >() ),
+#endif
+        end );
+        //stat->Print(StatisticsCollector::Stdout);
+        //if(value >= 2097152) stat->Print(StatisticsCollector::HTMLFile);
+    }
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+int main(int argc, char* argv[]) {
+    if(argc>1) Verbose = true;
+    //if(argc>2) ExtraVerbose = true;
+    MinThread = 1; MaxThread = task_scheduler_init::default_num_threads();
+    ParseCommandLine( argc, argv );
+
+    ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc");
+
+    {
+        test_hash_map the_test;
+        for( int t=MinThread; t <= MaxThread; t++)
+            for( int o=/*2048*/(1<<8)*8; o<2200000; o*=2 )
+                the_test.factory(o, t);
+        the_test.report.SetTitle("Nanoseconds per operation of (Mode) for N items in container (Name)");
+        the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)");
+        the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML);
+    }
+    return 0;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map_fill.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_hash_map_fill.cpp
new file mode 100644 (file)
index 0000000..3ffcd80
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// configuration:
+
+// Size of final table (must be multiple of STEP_*)
+int MAX_TABLE_SIZE = 2000000;
+
+// Specify list of unique percents (5-30,100) to test against. Max 10 values
+#define UNIQUE_PERCENTS PERCENT(5); PERCENT(10); PERCENT(20); PERCENT(30); PERCENT(100)
+
+// enable/disable tests for:
+#define BOX1 "CHMap"
+#define BOX1TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/>
+#define BOX1HEADER "tbb/concurrent_hash_map.h"
+
+// enable/disable tests for:
+#define BOX2 "CUMap"
+#define BOX2TEST ValuePerSecond<Uniques<tbb::concurrent_unordered_map<int,int> >, 1000000/*ns*/>
+#define BOX2HEADER "tbb/concurrent_unordered_map.h"
+
+// enable/disable tests for:
+//#define BOX3 "OLD"
+#define BOX3TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/>
+#define BOX3HEADER "tbb/concurrent_hash_map-5468.h"
+
+#define TBB_USE_THREADING_TOOLS 0
+//////////////////////////////////////////////////////////////////////////////////
+
+#include <cstdlib>
+#include <math.h>
+#include "tbb/tbb_stddef.h"
+#include <vector>
+#include <map>
+// needed by hash_maps
+#include <stdexcept>
+#include <iterator>
+#include <algorithm>                 // std::swap
+#include <utility>      // Need std::pair
+#include <cstring>      // Need std::memset
+#include <typeinfo>
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/aligned_space.h"
+#include "tbb/atomic.h"
+#include "tbb/_concurrent_unordered_internal.h"
+// for test
+#include "tbb/spin_mutex.h"
+#include "time_framework.h"
+
+
+using namespace tbb;
+using namespace tbb::internal;
+
+/////////////////////////////////////////////////////////////////////////////////////////
+// Input data built for test
+int *Data;
+
+// Main test class used to run the timing tests. All overridden methods are called by the framework
+template<typename TableType>
+struct Uniques : TesterBase {
+    TableType Table;
+    int n_items;
+
+    // Initializes base class with number of test modes
+    Uniques() : TesterBase(2), Table(MaxThread*16) {
+        //Table->max_load_factor(1); // add stub into hash_map to uncomment it
+    }
+    ~Uniques() {}
+    
+    // Returns name of test mode specified by number
+    /*override*/ std::string get_name(int testn) {
+        if(testn == 1) return "find";
+        return "insert";
+    }
+
+    // Informs the class that value and threads number become known
+    /*override*/ void init() {
+        n_items = value/threads_count; // operations
+    }
+
+    // Informs the class that the test mode for specified thread is about to start
+    /*override*/ void test_prefix(int testn, int t) {
+        barrier->wait();
+        if(Verbose && !t && testn) printf("%s: inserted %u, %g%% of operations\n", tester_name, unsigned(Table.size()), 100.0*Table.size()/(value*testn));
+    }
+
+    // Executes test mode for a given thread. Return value is ignored when used with timing wrappers.
+    /*override*/ double test(int testn, int t)
+    {
+        if( testn != 1 ) { // do insertions
+            for(int i = testn*value+t*n_items, e = testn*value+(t+1)*n_items; i < e; i++) {
+                Table.insert( std::make_pair(Data[i],t) );
+            }
+        } else { // do last finds
+            for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) {
+                size_t c =
+                    Table.count( Data[i] );
+                ASSERT( c == 1, NULL ); // must exist
+            }
+        }
+        return 0;
+    }
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////
+#include <limits>
+
+// Using BOX declarations from configuration
+#include "time_sandbox.h"
+
+int rounds = 0;
+// Prepares the input data for given unique percent
+void execute_percent(test_sandbox &the_test, int p) {
+    int input_size = MAX_TABLE_SIZE*100/p;
+    Data = new int[input_size];
+    int uniques = p==100?std::numeric_limits<int>::max() : MAX_TABLE_SIZE;
+    ASSERT(p==100 || p <= 30, "Function is broken for %% > 30 except for 100%%");
+    for(int i = 0; i < input_size; i++)
+        Data[i] = rand()%uniques;
+    for(int t = MinThread; t <= MaxThread; t++)
+        the_test.factory(input_size, t); // executes the tests specified in BOX-es for given 'value' and threads
+    the_test.report.SetRoundTitle(rounds++, "%d%%", p);
+}
+#define PERCENT(x) execute_percent(the_test, x)
+
+int main(int argc, char* argv[]) {
+    if(argc>1) Verbose = true;
+    //if(argc>2) ExtraVerbose = true;
+    MinThread = 1; MaxThread = task_scheduler_init::default_num_threads();
+    ParseCommandLine( argc, argv );
+    if(getenv("TABLE_SIZE"))
+        MAX_TABLE_SIZE = atoi(getenv("TABLE_SIZE"));
+
+    ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc");
+    // Declares test processor
+    test_sandbox the_test("time_hash_map_fill"/*, StatisticsCollector::ByThreads*/);
+    srand(10101);
+    UNIQUE_PERCENTS; // test the percents
+    the_test.report.SetTitle("Operations per nanosecond");
+    the_test.report.SetRunInfo("Items", MAX_TABLE_SIZE);
+    the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); // Write files
+    return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_locked_work.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_locked_work.cpp
new file mode 100644 (file)
index 0000000..13a9391
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+////// Test configuration ////////////////////////////////////////////////////
+#define SECONDS_RATIO 1000000 // microseconds
+
+#ifndef REPEAT_K
+#define REPEAT_K 50 // repeat coefficient
+#endif
+
+int outer_work[] = {/*256,*/ 64, 16, 4, 0};
+int inner_work[] = {32, 8, 0 };
+
+// keep it to calibrate the time of work without synchronization
+#define BOX1 "baseline"
+#define BOX1TEST TimeTest< TBB_Mutex<tbb::null_mutex>, SECONDS_RATIO >
+
+// enable/disable tests for:
+#define BOX2 "spin_mutex"
+#define BOX2TEST TimeTest< TBB_Mutex<tbb::spin_mutex>, SECONDS_RATIO >
+
+// enable/disable tests for:
+#define BOX3 "spin_rw_mutex"
+#define BOX3TEST TimeTest< TBB_Mutex<tbb::spin_rw_mutex>, SECONDS_RATIO >
+
+// enable/disable tests for:
+#define BOX4 "queuing_mutex"
+#define BOX4TEST TimeTest< TBB_Mutex<tbb::queuing_mutex>, SECONDS_RATIO >
+
+// enable/disable tests for:
+//#define BOX5 "queuing_rw_mutex"
+#define BOX5TEST TimeTest< TBB_Mutex<tbb::queuing_rw_mutex>, SECONDS_RATIO >
+
+//////////////////////////////////////////////////////////////////////////////
+
+#include <cstdlib>
+#include <math.h>
+#include <algorithm>                 // std::swap
+#include <utility>      // Need std::pair from here
+#include <sstream>
+#include "tbb/tbb_stddef.h"
+#include "tbb/null_mutex.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/queuing_mutex.h"
+#include "tbb/queuing_rw_mutex.h"
+#include "tbb/mutex.h"
+
+#if INTEL_TRIAL==2
+#include "tbb/parallel_for.h" // enable threading by TBB scheduler
+#include "tbb/task_scheduler_init.h"
+#include "tbb/blocked_range.h" 
+#endif
+// for test
+#include "time_framework.h"
+
+using namespace tbb;
+using namespace tbb::internal;
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+//! base class for tests family
+struct TestLocks : TesterBase {
+    // Inherits "value", "threads_count", and other variables
+    TestLocks() : TesterBase(/*number of modes*/sizeof(outer_work)/sizeof(int)) {}
+    //! returns name of test part/mode
+    /*override*/std::string get_name(int testn) {
+        std::ostringstream buf;
+        buf.width(4); buf.fill('0');
+        buf << outer_work[testn]; // mode number
+        return buf.str();
+    }
+    //! enables results types and returns theirs suffixes
+    /*override*/const char *get_result_type(int, result_t type) const {
+        switch(type) {
+            case MIN: return " min";
+            case MAX: return " max";
+            default: return 0;
+        }
+    }
+    //! repeats count
+    int repeat_until(int /*test_n*/) const {
+        return REPEAT_K*100;//TODO: suggest better?
+    }
+    //! fake work
+    void do_work(int work) volatile {
+        for(int i = 0; i < work; i++) {
+            volatile int x = i;
+            __TBB_Pause(0); // just to call inline assembler
+            x *= work/threads_count;
+        }
+    }
+};
+
+//! template test unit for any of TBB mutexes
+template<typename M>
+struct TBB_Mutex : TestLocks {
+    M mutex;
+
+    double test(int testn, int /*threadn*/)
+    {
+        for(int r = 0; r < repeat_until(testn); ++r) {
+            do_work(outer_work[testn]);
+            {
+                typename M::scoped_lock with(mutex);
+                do_work(/*inner work*/value);
+            }
+        }
+        return 0;
+    }
+};
+
+/////////////////////////////////////////////////////////////////////////////////////////
+
+//Using BOX declarations
+#include "time_sandbox.h"
+
+// run tests for each of inner work value
+void RunLoops(test_sandbox &the_test, int thread) {
+    for( unsigned i=0; i<sizeof(inner_work)/sizeof(int); ++i )
+        the_test.factory(inner_work[i], thread);
+}
+
+int main(int argc, char* argv[]) {
+    if(argc>1) Verbose = true;
+    int DefThread = task_scheduler_init::default_num_threads();
+    MinThread = 1; MaxThread = DefThread+1;
+    ParseCommandLine( argc, argv );
+    ASSERT(MinThread <= MaxThread, 0);
+#if INTEL_TRIAL && defined(__TBB_parallel_for_H)
+    task_scheduler_init me(MaxThread);
+#endif
+    {
+        test_sandbox the_test("time_locked_work", StatisticsCollector::ByThreads);
+        //TODO: refactor this out as RunThreads(test&)
+        for( int t = MinThread; t < DefThread && t <= MaxThread; t *= 2)
+            RunLoops( the_test, t ); // execute undersubscribed threads
+        if( DefThread > MinThread && DefThread <= MaxThread )
+            RunLoops( the_test, DefThread ); // execute on all hw threads
+        if( DefThread < MaxThread)
+            RunLoops( the_test, MaxThread ); // execute requested oversubscribed threads
+
+        the_test.report.SetTitle("Time of lock/unlock for mutex Name with Outer and Inner work");
+        //the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)");
+        the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML, /*ModeName*/ "Outer work");
+    }
+    return 0;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_sandbox.h b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_sandbox.h
new file mode 100644 (file)
index 0000000..950e674
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TIME_FRAMEWORK_H__
+#error time_framework.h must be included
+#endif
+
+#define INJECT_TBB namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } }
+#define INJECT_TBB5 namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } }
+
+#ifndef INJECT_BOX_NAMES
+#if defined(__TBB_task_H) || defined(__TBB_concurrent_unordered_internal_H) || defined(__TBB_reader_writer_lock_H)
+#define INJECT_BOX_NAMES INJECT_TBB INJECT_TBB5
+#else
+#define INJECT_BOX_NAMES INJECT_TBB
+#endif
+#endif
+
+#ifdef BOX1
+namespace sandbox1 {
+    INJECT_BOX_NAMES
+#   ifdef BOX1HEADER
+#   include BOX1HEADER
+#   endif
+    typedef ::BOX1TEST testbox;
+}
+#endif
+#ifdef BOX2
+namespace sandbox2 {
+    INJECT_BOX_NAMES
+#   ifdef BOX2HEADER
+#   include BOX2HEADER
+#   endif
+    typedef ::BOX2TEST testbox;
+}
+#endif
+#ifdef BOX3
+namespace sandbox3 {
+    INJECT_BOX_NAMES
+#   ifdef BOX3HEADER
+#   include BOX3HEADER
+#   endif
+    typedef ::BOX3TEST testbox;
+}
+#endif
+#ifdef BOX4
+namespace sandbox4 {
+    INJECT_BOX_NAMES
+#   ifdef BOX4HEADER
+#   include BOX4HEADER
+#   endif
+    typedef ::BOX4TEST testbox;
+}
+#endif
+#ifdef BOX5
+namespace sandbox5 {
+    INJECT_BOX_NAMES
+#   ifdef BOX5HEADER
+#   include BOX5HEADER
+#   endif
+    typedef ::BOX5TEST testbox;
+}
+#endif
+#ifdef BOX6
+namespace sandbox6 {
+    INJECT_BOX_NAMES
+#   ifdef BOX6HEADER
+#   include BOX6HEADER
+#   endif
+    typedef ::BOX6TEST testbox;
+}
+#endif
+#ifdef BOX7
+namespace sandbox7 {
+    INJECT_BOX_NAMES
+#   ifdef BOX7HEADER
+#   include BOX7HEADER
+#   endif
+    typedef ::BOX7TEST testbox;
+}
+#endif
+#ifdef BOX8
+namespace sandbox8 {
+    INJECT_BOX_NAMES
+#   ifdef BOX8HEADER
+#   include BOX8HEADER
+#   endif
+    typedef ::BOX8TEST testbox;
+}
+#endif
+#ifdef BOX9
+namespace sandbox9 {
+    INJECT_BOX_NAMES
+#   ifdef BOX9HEADER
+#   include BOX9HEADER
+#   endif
+    typedef ::BOX9TEST testbox;
+}
+#endif
+
+//if harness.h included
+#if defined(ASSERT) && !HARNESS_NO_PARSE_COMMAND_LINE
+#ifndef TEST_PREFIX
+#define TEST_PREFIX if(Verbose) printf("Processing with %d threads: %ld...\n", threads, long(value));
+#endif
+#endif//harness included
+
+#ifndef TEST_PROCESSOR_NAME
+#define TEST_PROCESSOR_NAME test_sandbox
+#endif
+
+class TEST_PROCESSOR_NAME : public TestProcessor {
+public:
+    TEST_PROCESSOR_NAME(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg)
+        : TestProcessor(name, sort_by) {}
+    void factory(arg_t value, int threads) {
+#ifdef TEST_PREFIX
+        TEST_PREFIX
+#endif
+        process( value, threads,
+#define RUNBOX(n) run(#n"."BOX##n, new sandbox##n::testbox() )
+#ifdef BOX1
+        RUNBOX(1),
+#endif
+#ifdef BOX2
+        RUNBOX(2),
+#endif
+#ifdef BOX3
+        RUNBOX(3),
+#endif
+#ifdef BOX4
+        RUNBOX(4),
+#endif
+#ifdef BOX5
+        RUNBOX(5),
+#endif
+#ifdef BOX6
+        RUNBOX(6),
+#endif
+#ifdef BOX7
+        RUNBOX(7),
+#endif
+#ifdef BOX8
+        RUNBOX(8),
+#endif
+#ifdef BOX9
+        RUNBOX(9),
+#endif
+        end );
+#ifdef TEST_POSTFIX
+        TEST_POSTFIX
+#endif
+    }
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_vector.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/perf/time_vector.cpp
new file mode 100644 (file)
index 0000000..36442f1
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+//#define DO_SCALABLEALLOC
+
+#include <cstdlib>
+#include <cmath>
+#include <vector>
+#include <algorithm>
+#include <functional>
+#include <numeric>
+#include "tbb/tbb_stddef.h"
+#include "tbb/spin_mutex.h"
+#ifdef DO_SCALABLEALLOC
+#include "tbb/scalable_allocator.h"
+#endif
+#include "tbb/concurrent_vector.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/parallel_for.h"
+#include "tbb/tick_count.h"
+#include "tbb/blocked_range.h"
+#define HARNESS_CUSTOM_MAIN 1
+#include "../test/harness.h"
+//#include "harness_barrier.h"
+#include "../test/harness_allocator.h"
+#define STATISTICS_INLINE
+#include "statistics.h"
+
+using namespace tbb;
+bool ExtraVerbose = false;
+
+class Timer {
+    tbb::tick_count tick;
+public:
+    Timer() { tick = tbb::tick_count::now(); }
+    double get_time()  { return (tbb::tick_count::now() - tick).seconds(); }
+    double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); }
+    double mark_time() { tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); }
+    double mark_time(const Timer &newer) { tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); }
+};
+
+/************************************************************************/
+/* TEST1                                                                */
+/************************************************************************/
+#define mk_vector_test1(v, a) vector_test1<v<Timer, static_counting_allocator<a<Timer> > >, v<double, static_counting_allocator<a<double> > > >
+template<class timers_vector_t, class values_vector_t>
+class vector_test1 {
+    const char *mode;
+    StatisticsCollector &stat;
+    StatisticsCollector::TestCase key[16];
+
+public:
+    vector_test1(const char *m, StatisticsCollector &s)  :  mode(m), stat(s) {}
+
+    vector_test1 &operator()(size_t len) {
+        if(Verbose) printf("test1<%s>(%u): collecting timing statistics\n", mode, unsigned(len));
+        __TBB_ASSERT(sizeof(Timer) == sizeof(double), NULL);
+        static const char *test_names[] = {
+            "b)creation wholly",
+            "a)creation by push",
+            "c)operation time per item",
+            0 };
+        for(int i = 0; test_names[i]; ++i) key[i] = stat.SetTestCase(test_names[i], mode, len);
+
+        Timer timer0; timers_vector_t::allocator_type::init_counters();
+        timers_vector_t tv(len);
+        Timer timer1; values_vector_t::allocator_type::init_counters();
+        values_vector_t dv;
+        for (size_t i = 0; i < len; ++i)
+            dv.push_back( i );
+        Timer timer2;
+        for (size_t i = 0; i < len; ++i)
+        {
+            dv[len-i-1] = timer0.diff_time(tv[i]);
+            tv[i].mark_time();
+        }
+        stat.AddStatisticValue( key[2], "1total, ms", "%.3f", timer2.get_time()*1000.0 );
+        stat.AddStatisticValue( key[1], "1total, ms", "%.3f", timer1.diff_time(timer2)*1000.0 );
+        stat.AddStatisticValue( key[0], "1total, ms", "%.3f", timer0.diff_time(timer1)*1000.0 );
+        //allocator statistics
+        stat.AddStatisticValue( key[0], "2total allocations", "%d", int(timers_vector_t::allocator_type::allocations) );
+        stat.AddStatisticValue( key[1], "2total allocations", "%d", int(values_vector_t::allocator_type::allocations) );
+        stat.AddStatisticValue( key[2], "2total allocations", "%d",  0);
+        stat.AddStatisticValue( key[0], "3total alloc#items", "%d", int(timers_vector_t::allocator_type::items_allocated) );
+        stat.AddStatisticValue( key[1], "3total alloc#items", "%d", int(values_vector_t::allocator_type::items_allocated) );
+        stat.AddStatisticValue( key[2], "3total alloc#items", "%d",  0);
+        //remarks
+        stat.AddStatisticValue( key[0], "9note", "segment creation time, ns:");
+        stat.AddStatisticValue( key[2], "9note", "average op-time per item, ns:");
+        Timer last_timer(timer2); double last_value = 0;
+        for (size_t j = 0, i = 2; i < len; i *= 2, j++) {
+            stat.AddRoundResult( key[0], (dv[len-i-1]-last_value)*1000000.0 );
+            last_value = dv[len-i-1];
+            stat.AddRoundResult( key[2], last_timer.diff_time(tv[i])/double(i)*1000000.0 );
+            last_timer = tv[i];
+            stat.SetRoundTitle(j, i);
+        }
+        tv.clear(); dv.clear();
+        //__TBB_ASSERT(timers_vector_t::allocator_type::items_allocated == timers_vector_t::allocator_type::items_freed, NULL);
+        //__TBB_ASSERT(values_vector_t::allocator_type::items_allocated == values_vector_t::allocator_type::items_freed, NULL);
+       return *this;
+    }
+};
+
+/************************************************************************/
+/* TEST2                                                                */
+/************************************************************************/
+#define mk_vector_test2(v, a) vector_test2<v<size_t, a<size_t> > >
+template<class vector_t>
+class vector_test2 {
+    const char *mode;
+    static const int ntrial = 10;
+    StatisticsCollector &stat;
+
+public:
+    vector_test2(const char *m, StatisticsCollector &s)  :  mode(m), stat(s) {}
+
+    vector_test2 &operator()(size_t len) {
+        if(Verbose) printf("test2<%s>(%u): performing standard transformation sequence on vector\n", mode, unsigned(len));
+        StatisticsCollector::TestCase init_key = stat.SetTestCase("allocate", mode, len);
+        StatisticsCollector::TestCase fill_key = stat.SetTestCase("fill", mode, len);
+        StatisticsCollector::TestCase proc_key = stat.SetTestCase("process", mode, len);
+        StatisticsCollector::TestCase full_key = stat.SetTestCase("total time", mode, len);
+        for (int i = 0; i < ntrial; i++) {
+            Timer timer0;
+            vector_t v1(len);
+            vector_t v2(len);
+            Timer timer1;
+            std::generate(v1.begin(), v1.end(), values(0));
+            std::generate(v2.begin(), v2.end(), values(size_t(-len)));
+            Timer timer2;
+            std::reverse(v1.rbegin(), v1.rend());
+            std::inner_product(v1.begin(), v1.end(), v2.rbegin(), 1);
+            std::sort(v1.rbegin(), v1.rend());
+            std::sort(v2.rbegin(), v2.rend());
+            std::set_intersection(v1.begin(), v1.end(), v2.rbegin(), v2.rend(), v1.begin());
+            Timer timer3;
+            stat.AddRoundResult( proc_key, timer2.diff_time(timer3)*1000.0 );
+            stat.AddRoundResult( fill_key, timer1.diff_time(timer2)*1000.0 );
+            stat.AddRoundResult( init_key, timer0.diff_time(timer1)*1000.0 );
+            stat.AddRoundResult( full_key, timer0.diff_time(timer3)*1000.0 );
+        }
+        stat.SetStatisticFormula("1Average", "=AVERAGE(ROUNDS)");
+        stat.SetStatisticFormula("2+/-", "=(MAX(ROUNDS)-MIN(ROUNDS))/2");
+        return *this;
+    }
+
+    class values
+    {
+        size_t value;
+    public:
+        values(size_t i) : value(i) {}
+        size_t operator()() {
+            return value++%(1|(value^55));
+        }
+    };
+};
+
+/************************************************************************/
+/* TEST3                                                                */
+/************************************************************************/
+#define mk_vector_test3(v, a) vector_test3<v<char, local_counting_allocator<a<char>, size_t > > >
+template<class vector_t>
+class vector_test3 {
+    const char *mode;
+    StatisticsCollector &stat;
+
+public:
+    vector_test3(const char *m, StatisticsCollector &s)  :  mode(m), stat(s) {}
+
+    vector_test3 &operator()(size_t len) {
+        if(Verbose) printf("test3<%s>(%u): collecting allocator statistics\n", mode, unsigned(len));
+        static const size_t sz = 1024;
+        vector_t V[sz];
+        StatisticsCollector::TestCase vinst_key = stat.SetTestCase("instances number", mode, len);
+        StatisticsCollector::TestCase count_key = stat.SetTestCase("allocations count", mode, len);
+        StatisticsCollector::TestCase items_key = stat.SetTestCase("allocated items", mode, len);
+        //stat.ReserveRounds(sz-1);
+        for (size_t c = 0, i = 0, s = sz/2; s >= 1 && i < sz; s /= 2, c++)
+        {
+            const size_t count = c? 1<<(c-1) : 0;
+            for (size_t e = i+s; i < e; i++) {
+                //if(count >= 16) V[i].reserve(count);
+                for (size_t j = 0; j < count; j++)
+                    V[i].push_back(j);
+            }
+            stat.SetRoundTitle ( c, count );
+            stat.AddRoundResult( vinst_key, s );
+            stat.AddRoundResult( count_key, V[i-1].get_allocator().allocations );
+            stat.AddRoundResult( items_key, V[i-1].get_allocator().items_allocated );
+        }
+        return *this;
+    }
+};
+
+/************************************************************************/
+/* TYPES SET FOR TESTS                                                  */
+/************************************************************************/
+#define types_set(n, title, op) { StatisticsCollector Collector("time_vector"#n); Collector.SetTitle title; \
+    {mk_vector_test##n(tbb::concurrent_vector, tbb::cache_aligned_allocator) ("TBB:NFS", Collector)op;} \
+    {mk_vector_test##n(tbb::concurrent_vector, tbb::tbb_allocator)           ("TBB:TBB", Collector)op;} \
+    {mk_vector_test##n(tbb::concurrent_vector, std::allocator)               ("TBB:STD", Collector)op;} \
+    {mk_vector_test##n(std::vector, tbb::cache_aligned_allocator)            ("STL:NFS", Collector)op;} \
+    {mk_vector_test##n(std::vector, tbb::tbb_allocator)                      ("STL:TBB", Collector)op;} \
+    {mk_vector_test##n(std::vector, std::allocator)                          ("STL:STD", Collector)op;} \
+    Collector.Print(StatisticsCollector::Stdout|StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); }
+
+
+/************************************************************************/
+/* MAIN DRIVER                                                          */
+/************************************************************************/
+int main(int argc, char* argv[]) {
+       if(argc>1) Verbose = true;
+       if(argc>2) ExtraVerbose = true;
+    MinThread = 0; MaxThread = 500000; // use in another meaning - test#:problem size
+    ParseCommandLine( argc, argv );
+
+    ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded");
+    
+    if(!MinThread || MinThread == 1)
+        types_set(1, ("Vectors performance test #1 for %d", MaxThread), (MaxThread) )
+    if(!MinThread || MinThread == 2)
+        types_set(2, ("Vectors performance test #2 for %d", MaxThread), (MaxThread) )
+    if(!MinThread || MinThread == 3)
+        types_set(3, ("Vectors performance test #3 for %d", MaxThread), (MaxThread) )
+
+    if(!Verbose) printf("done\n");
+    return 0;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/index.html
new file mode 100644 (file)
index 0000000..e92185c
--- /dev/null
@@ -0,0 +1,43 @@
+<HTML>
+<BODY>
+<H2>Overview</H2>
+
+This directory has source code that must be statically linked into an RML client.
+
+<H2>Files</H2>
+
+<DL>
+<DT><P><A HREF="rml_factory.h">rml_factory.h</A>
+<DD>Text shared by <A HREF="rml_omp.cpp">rml_omp.cpp</A> and <A HREF="rml_tbb.cpp">rml_tbb.cpp</A>.
+       This is not an ordinary include file, so it does not have an #ifndef guard.</P>
+</DL>
+
+<H3> Specific to client=OpenMP</H3>
+<DL>
+<DT><P><A HREF="rml_omp.cpp">rml_omp.cpp</A>
+<DD>Source file for OpenMP client.</P>
+<DT><P><A HREF="omp_dynamic_link.h">omp_dynamic_link.h</A>
+<DT><A HREF="omp_dynamic_link.cpp">omp_dynamic_link.cpp</A>
+<DD>Source files for dynamic linking support.  
+       The code is the code from the TBB source directory, but adjusted so that it 
+       appears in namespace <TT>__kmp</TT> instead of namespace <TT>tbb::internal</TT>.
+</DL>
+<H3> Specific to client=TBB</H3>
+<DL>
+<DT><P><A HREF="rml_tbb.cpp">rml_tbb.cpp</A>
+<DD>Source file for TBB client.  It uses the dynamic linking support from the TBB source directory.
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/library_assert.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/library_assert.h
new file mode 100644 (file)
index 0000000..f198fa5
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef LIBRARY_ASSERT_H
+#define LIBRARY_ASSERT_H
+
+#ifndef  LIBRARY_ASSERT
+#ifdef KMP_ASSERT2
+#define LIBRARY_ASSERT(x,y) KMP_ASSERT2((x),(y))
+#else
+#include <assert.h>
+#define LIBRARY_ASSERT(x,y) assert(x)
+#endif
+#endif /* LIBRARY_ASSERT */
+
+#endif /* LIBRARY_ASSERT_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp
new file mode 100644 (file)
index 0000000..c40e941
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "omp_dynamic_link.h"
+#include "library_assert.h"
+#include "tbb/dynamic_link.cpp" // Refers to src/tbb, not include/tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/omp_dynamic_link.h
new file mode 100644 (file)
index 0000000..02f306c
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __KMP_omp_dynamic_link_H
+#define __KMP_omp_dynamic_link_H
+
+#define OPEN_INTERNAL_NAMESPACE namespace __kmp {
+#define CLOSE_INTERNAL_NAMESPACE }
+
+#include "tbb/dynamic_link.h" // Refers to src/tbb, not include/tbb 
+
+#endif /* __KMP_omp_dynamic_link_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_factory.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_factory.h
new file mode 100644 (file)
index 0000000..e6d52a5
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// No ifndef guard because this file is not a normal include file.
+
+#if TBB_USE_DEBUG
+#define DEBUG_SUFFIX "_debug"
+#else
+#define DEBUG_SUFFIX
+#endif /* TBB_USE_DEBUG */
+
+// RML_SERVER_NAME is the name of the RML server library.
+#if _WIN32||_WIN64
+#define RML_SERVER_NAME "irml" DEBUG_SUFFIX ".dll"
+#elif __APPLE__
+#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".dylib"
+#elif __linux__
+#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so.1"
+#elif __FreeBSD__ || __sun || _AIX
+#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so"
+#else
+#error Unknown OS
+#endif
+
+#include "library_assert.h"
+
+const ::rml::versioned_object::version_type CLIENT_VERSION = 2;
+
+#if __TBB_WEAK_SYMBOLS
+    #pragma weak __RML_open_factory
+    #pragma weak __TBB_make_rml_server
+    #pragma weak __RML_close_factory
+    #pragma weak __TBB_call_with_my_server_info
+    extern "C" {
+        ::rml::factory::status_type __RML_open_factory ( ::rml::factory&, ::rml::versioned_object::version_type&, ::rml::versioned_object::version_type );
+        ::rml::factory::status_type __TBB_make_rml_server( tbb::internal::rml::tbb_factory& f, tbb::internal::rml::tbb_server*& server, tbb::internal::rml::tbb_client& client );
+        void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg );
+        void __RML_close_factory( ::rml::factory& f );
+    }
+#endif /* __TBB_WEAK_SYMBOLS */
+
+::rml::factory::status_type FACTORY::open() {
+    // Failure of following assertion indicates that factory is already open, or not zero-inited.
+    LIBRARY_ASSERT( !library_handle, NULL );
+    status_type (*open_factory_routine)( factory&, version_type&, version_type );
+    dynamic_link_descriptor server_link_table[4] = {
+        DLD(__RML_open_factory,open_factory_routine),
+        MAKE_SERVER(my_make_server_routine),
+        DLD(__RML_close_factory,my_wait_to_close_routine),
+        GET_INFO(my_call_with_server_info_routine),
+    };
+    status_type result;
+    dynamic_link_handle h;
+    if( dynamic_link( RML_SERVER_NAME, server_link_table, 4, 4, &h ) ) {
+        library_handle = h; 
+        version_type server_version;
+        result = (*open_factory_routine)( *this, server_version, CLIENT_VERSION );
+        // server_version can be checked here for incompatibility here if necessary.
+    } else {
+        library_handle = NULL;
+        result = st_not_found;
+    }
+    return result;
+}
+
+void FACTORY::close() {
+    if( library_handle )
+        (*my_wait_to_close_routine)(*this);
+    if( (size_t)library_handle>FACTORY::c_dont_unload ) {
+        dynamic_link_handle h = library_handle;
+        dynamic_unlink(h);
+        library_handle = NULL;
+    }
+}
+
+::rml::factory::status_type FACTORY::make_server( SERVER*& s, CLIENT& c) {
+    // Failure of following assertion means that factory was not successfully opened.
+    LIBRARY_ASSERT( my_make_server_routine, NULL );
+    return (*my_make_server_routine)(*this,s,c);
+}
+
+void FACTORY::call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const {
+    // Failure of following assertion means that factory was not successfully opened.
+    LIBRARY_ASSERT( my_call_with_server_info_routine, NULL );
+    (*my_call_with_server_info_routine)( cb, arg );
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_omp.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_omp.cpp
new file mode 100644 (file)
index 0000000..336fd9c
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_omp.h"
+#include "omp_dynamic_link.h"
+#include <assert.h>
+
+namespace __kmp {
+namespace rml {
+
+#define MAKE_SERVER(x) DLD(__KMP_make_rml_server,x)
+#define GET_INFO(x) DLD(__KMP_call_with_my_server_info,x)
+#define SERVER omp_server 
+#define CLIENT omp_client
+#define FACTORY omp_factory
+#include "rml_factory.h"
+
+} // rml
+} // __kmp
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_tbb.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/client/rml_tbb.cpp
new file mode 100644 (file)
index 0000000..d627737
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "../include/rml_tbb.h"
+#include "tbb/dynamic_link.h"
+#include <assert.h>
+
+namespace tbb {
+namespace internal {
+namespace rml {
+
+#define MAKE_SERVER(x) DLD(__TBB_make_rml_server,x)
+#define GET_INFO(x) DLD(__TBB_call_with_my_server_info,x)
+#define SERVER tbb_server 
+#define CLIENT tbb_client
+#define FACTORY tbb_factory
+#include "rml_factory.h"
+
+} // rml
+} // internal
+} // tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/index.html
new file mode 100644 (file)
index 0000000..6a47794
--- /dev/null
@@ -0,0 +1,30 @@
+<HTML>
+<BODY>
+<H2>Overview</H2>
+
+This directory has the include files for the Resource Management Layer (RML).
+
+<H2>Files</H2>
+
+<DL>
+<DT><P><A HREF="rml_base.h">rml_base.h</A>
+<DD>Interfaces shared by TBB and OpenMP.</P>
+<DT><P><A HREF="rml_omp.h">rml_omp.h</A>
+<DD>Interface exclusive to OpenMP.</P>
+<DT><P><A HREF="rml_tbb.h">rml_tbb.h</A>
+<DD>Interface exclusive to TBB.</P>
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_base.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_base.h
new file mode 100644 (file)
index 0000000..6abdaf8
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Header guard and namespace names follow rml conventions.
+
+#ifndef __RML_rml_base_H
+#define __RML_rml_base_H
+
+#include <cstddef>
+
+#if _WIN32||_WIN64
+#include <windows.h>
+#endif /* _WIN32||_WIN64 */
+
+#ifdef RML_PURE_VIRTUAL_HANDLER
+#define RML_PURE(T) {RML_PURE_VIRTUAL_HANDLER(); return (T)0;}
+#else
+#define RML_PURE(T) = 0;
+#endif
+
+namespace rml {
+
+//! Base class for denying assignment and copy constructor.
+class no_copy {
+    void operator=( no_copy& );
+    no_copy( no_copy& );
+public:
+    no_copy() {}
+};
+
+class server;
+
+class versioned_object {
+public:
+    //! A version number
+    typedef unsigned version_type;
+    
+    //! Get version of this object
+    /** The version number is incremented when a incompatible change is introduced.
+        The version number is invariant for the lifetime of the object. */
+    virtual version_type version() const RML_PURE(version_type)
+};
+
+//! Represents a client's job for an execution context.
+/** A job object is constructed by the client.
+    Not derived from versioned_object because version is same as for client. */
+class job {
+    friend class server;
+
+    //! Word for use by server
+    /** Typically the server uses it to speed up internal lookup.
+        Clients must not modify the word. */
+    void* scratch_ptr;
+};
+
+//! Information that client provides to server when asking for a server.
+/** The instance must endure at least until acknowledge_close_connection is called. */
+class client: public versioned_object {
+public:
+    //! Typedef for convenience of derived classes in other namespaces.
+    typedef ::rml::job job;
+
+    //! Index of a job in a job pool
+    typedef unsigned size_type;
+
+    //! Maximum number of threads that client can exploit profitably if nothing else is running on the machine.  
+    /** The returned value should remain invariant for the lifetime of the connection.  [idempotent] */
+    virtual size_type max_job_count() const RML_PURE(size_type)
+
+    //! Minimum stack size for each job.  0 means to use default stack size. [idempotent]
+    virtual std::size_t min_stack_size() const RML_PURE(std::size_t)
+
+    //! Server calls this routine when it needs client to create a job object.
+    virtual job* create_one_job() RML_PURE(job*)
+
+    //! Acknowledge that all jobs have been cleaned up.
+    /** Called by server in response to request_close_connection
+        after cleanup(job) has been called for each job. */
+    virtual void acknowledge_close_connection() RML_PURE(void)
+
+    enum policy_type {turnaround,throughput};
+
+    //! Inform server of desired policy. [idempotent]
+    virtual policy_type policy() const RML_PURE(policy_type)
+
+    //! Inform client that server is done with *this.   
+    /** Client should destroy the job.
+        Not necessarily called by execution context represented by *this.
+        Never called while any other thread is working on the job. */
+    virtual void cleanup( job& ) RML_PURE(void)
+
+    // In general, we should not add new virtual methods, because that would 
+    // break derived classes.  Think about reserving some vtable slots.  
+};
+
+// Information that server provides to client.
+// Virtual functions are routines provided by the server for the client to call. 
+class server: public versioned_object {
+public:
+    //! Typedef for convenience of derived classes.
+    typedef ::rml::job job;
+
+#if _WIN32||_WIN64
+    typedef void* execution_resource_t;
+#endif
+
+    //! Request that connection to server be closed.
+    /** Causes each job associated with the client to have its cleanup method called,
+        possibly by a thread different than the thread that created the job. 
+        This method can return before all cleanup methods return. 
+        Actions that have to wait after all cleanup methods return should be part of 
+        client::acknowledge_close_connection. 
+        Pass true as exiting if request_close_connection() is called because exit() is
+        called. In that case, it is the client's responsibility to make sure all threads
+        are terminated. In all other cases, pass false.  */
+    virtual void request_close_connection( bool exiting = false ) = 0;
+
+    //! Called by client thread when it reaches a point where it cannot make progress until other threads do.  
+    virtual void yield() = 0;
+
+    //! Called by client to indicate a change in the number of non-RML threads that are running.
+    /** This is a performance hint to the RML to adjust how many threads it should let run 
+        concurrently.  The delta is the change in the number of non-RML threads that are running.
+        For example, a value of 1 means the client has started running another thread, and a value 
+        of -1 indicates that the client has blocked or terminated one of its threads. */
+    virtual void independent_thread_number_changed( int delta ) = 0;
+
+    //! Default level of concurrency for which RML strives when there are no non-RML threads running.
+    /** Normally, the value is the hardware concurrency minus one. 
+        The "minus one" accounts for the thread created by main(). */
+    virtual unsigned default_concurrency() const = 0;
+
+protected:
+    static void*& scratch_ptr( job& j ) {return j.scratch_ptr;}
+};
+
+class factory {
+public:
+    //! status results
+    enum status_type {
+        st_success=0,
+        st_connection_exists,
+        st_not_found,
+        st_incompatible
+    };
+
+    //! Scratch pointer for use by RML.
+    void* scratch_ptr;
+
+protected:
+    //! Pointer to routine that waits for server to indicate when client can close itself.
+    status_type (*my_wait_to_close_routine)( factory& );
+
+public:
+    //! Library handle for use by RML.
+#if _WIN32||_WIN64
+    HMODULE library_handle;
+#else
+    void* library_handle;
+#endif /* _WIN32||_WIN64 */ 
+
+    //! Special marker to keep dll from being unloaded prematurely
+    static const std::size_t c_dont_unload = 1;
+};
+
+//! Typedef for callback functions to print server info
+typedef void (*server_info_callback_t)( void* arg, const char* server_info );
+
+} // namespace rml
+
+#endif /* __RML_rml_base_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_omp.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_omp.h
new file mode 100644 (file)
index 0000000..8202ade
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Header guard and namespace names follow OpenMP runtime conventions.
+
+#ifndef KMP_RML_OMP_H
+#define KMP_RML_OMP_H
+
+#include "rml_base.h"
+
+namespace __kmp {
+namespace rml {
+
+class omp_client;
+
+//------------------------------------------------------------------------
+// Classes instantiated by the server
+//------------------------------------------------------------------------
+
+//! Represents a set of omp worker threads provided by the server.
+class omp_server: public ::rml::server {
+public:
+    //! A number of coins (i.e., threads)
+    typedef unsigned size_type;
+
+    //! Return the number of coins in the bank. (negative if machine is oversubscribed).
+    virtual int current_balance() const = 0;
+  
+    //! Request n coins.  Returns number of coins granted. Oversubscription amount if negative.
+    /** Always granted if is_strict is true.
+        - Positive or zero result indicates that the number of coins was taken from the bank.
+        - Negative result indicates that no coins were taken, and that the bank has deficit 
+          by that amount and the caller (if being a good citizen) should return that many coins.
+     */
+    virtual int try_increase_load( size_type /*n*/, bool /*strict*/ ) = 0;
+
+    //! Return n coins into the bank.
+    virtual void decrease_load( size_type /*n*/ ) = 0;
+
+    //! Convert n coins into n threads.
+    /** When a thread returns, it is converted back into a coin and the coin is returned to the bank. */
+    virtual void get_threads( size_type /*m*/, void* /*cookie*/, job* /*array*/[] ) = 0;
+
+    /** Putting a thread to sleep - convert a thread into a coin
+        Waking up a thread        - convert a coin into a thread
+      
+       Note: conversion between a coin and a thread does not affect the accounting.
+     */
+#if _WIN32||_WIN64
+    //! Inform server of a tbb master thread.
+    virtual void register_master( execution_resource_t& /*v*/ ) = 0;
+
+    //! Inform server that the tbb master thread is done with its work.
+    virtual void unregister_master( execution_resource_t /*v*/ ) = 0;
+    //! deactivate
+    /** give control to ConcRT RM */
+    virtual void deactivate( job* ) = 0;
+
+    //! reactivate
+    virtual void reactivate( job* ) = 0;
+#endif /* _WIN32||_WIN64 */
+};
+
+
+//------------------------------------------------------------------------
+// Classes (or base classes thereof) instantiated by the client
+//------------------------------------------------------------------------
+
+class omp_client: public ::rml::client {
+public:
+    //! Called by server thread when it delivers a thread to client
+    /** The index argument is a 0-origin index of the job for this thread within the array
+        returned by method get_threads.  Server decreases the load by 1 (i.e., returning the coin
+        back to the bank) after this method returns. */
+    virtual void process( job&, void* /*cookie*/, size_type /*index*/ ) RML_PURE(void)
+};
+
+/** Client must ensure that instance is zero-inited, typically by being a file-scope object. */
+class omp_factory: public ::rml::factory {
+
+    //! Pointer to routine that creates an RML server.
+    status_type (*my_make_server_routine)( omp_factory&, omp_server*&, omp_client& );
+
+    //! Pointer to routine that calls callback function with server version info.
+    void (*my_call_with_server_info_routine)( ::rml::server_info_callback_t cb, void* arg );
+
+public:
+    typedef ::rml::versioned_object::version_type version_type;
+    typedef omp_client client_type;
+    typedef omp_server server_type;
+
+    //! Open factory.
+    /** Dynamically links against RML library. 
+        Returns st_success, st_incompatible, or st_not_found. */
+    status_type open();
+
+    //! Factory method to be called by client to create a server object.
+    /** Factory must be open. 
+        Returns st_success or st_incompatible . */
+    status_type make_server( server_type*&, client_type& );
+
+    //! Close factory.
+    void close();
+
+    //! Call the callback with the server build info.
+    void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const;
+};
+
+} // namespace rml
+} // namespace __kmp
+
+#endif /* KMP_RML_OMP_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_tbb.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/include/rml_tbb.h
new file mode 100644 (file)
index 0000000..6e9ea7e
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Header guard and namespace names follow TBB conventions.
+
+#ifndef __TBB_rml_tbb_H
+#define __TBB_rml_tbb_H
+
+#include "rml_base.h"
+
+namespace tbb {
+namespace internal {
+namespace rml {
+
+class tbb_client;
+
+//------------------------------------------------------------------------
+// Classes instantiated by the server
+//------------------------------------------------------------------------
+
+//! Represents a set of tbb worker threads provided by the server.
+class tbb_server: public ::rml::server {
+public:
+    //! Inform server of adjustments in the number of workers that the client can profitably use.
+    virtual void adjust_job_count_estimate( int delta ) = 0;
+
+#if _WIN32||_WIN64
+    //! Inform server of a tbb master thread.
+    virtual void register_master( execution_resource_t& v ) = 0;
+
+    //! Inform server that the tbb master thread is done with its work.
+    virtual void unregister_master( execution_resource_t v ) = 0;
+#endif /* _WIN32||_WIN64 */
+};
+
+//------------------------------------------------------------------------
+// Classes instantiated by the client
+//------------------------------------------------------------------------
+
+class tbb_client: public ::rml::client {
+public:
+    //! Defined by TBB to steal a task and execute it.  
+    /** Called by server when it wants an execution context to do some TBB work.
+        The method should return when it is okay for the thread to yield indefinitely. */
+    virtual void process( job& ) RML_PURE(void)
+};
+
+/** Client must ensure that instance is zero-inited, typically by being a file-scope object. */
+class tbb_factory: public ::rml::factory {
+
+    //! Pointer to routine that creates an RML server.
+    status_type (*my_make_server_routine)( tbb_factory&, tbb_server*&, tbb_client& );
+
+    //! Pointer to routine that calls callback function with server version info.
+    void (*my_call_with_server_info_routine)( ::rml::server_info_callback_t cb, void* arg );
+
+public:
+    typedef ::rml::versioned_object::version_type version_type;
+    typedef tbb_client client_type;
+    typedef tbb_server server_type;
+
+    //! Open factory.
+    /** Dynamically links against RML library. 
+        Returns st_success, st_incompatible, or st_not_found. */
+    status_type open();
+
+    //! Factory method to be called by client to create a server object.
+    /** Factory must be open. 
+        Returns st_success, or st_incompatible . */
+    status_type make_server( server_type*&, client_type& );
+
+    //! Close factory
+    void close();
+
+    //! Call the callback with the server build info
+    void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const;
+};
+
+} // namespace rml
+} // namespace internal
+} // namespace tbb
+
+#endif /*__TBB_rml_tbb_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/rml/index.html
new file mode 100644 (file)
index 0000000..1582714
--- /dev/null
@@ -0,0 +1,32 @@
+<HTML>
+<BODY>
+<H2>Overview</H2>
+
+The subdirectories pertain to the Resource Management Layer (RML).
+
+<H2>Directories</H2>
+
+<DL>
+<DT><P><A HREF="include/index.html">include/</A>
+<DD>Include files used by clients of RML.</P>
+<DT><P><A HREF="client/index.html">client/</A>
+<DD>Source files for code that must be statically linked with a client.</P>
+<DT><P><A HREF="server/index.html">server/</A>
+<DD>Source files for the RML server.</P>
+<DT><P><A HREF="test">test/</A>
+<DD>Unit tests for RML server and its components.</P>
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_nested.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_nested.cpp
new file mode 100644 (file)
index 0000000..d72801b
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdio>
+#include <float.h>
+#include <math.h>
+#include <time.h>
+
+#include <omp.h>
+#include <assert.h>
+
+#include "thread_level.h"
+
+#if _WIN32||_WIN64
+#include <Windows.h> /* Need Sleep */
+#else
+#include <unistd.h>  /* Need usleep */
+#endif
+
+void MilliSleep( unsigned milliseconds ) {
+#if _WIN32||_WIN64
+    Sleep( milliseconds );
+#else
+    usleep( milliseconds*1000 );
+#endif /* _WIN32||_WIN64 */
+}
+
+// Algorithm parameters
+const int Max_OMP_Outer_Threads = 8;
+
+// Global variables
+int max_outer_threads = Max_OMP_Outer_Threads;
+
+// Print help on command-line arguments
+void help_message(char *prog_name) {
+  fprintf(stderr, "\n%s usage:\n", prog_name);
+  fprintf(stderr, 
+         "  Parameters:\n"
+         "    -o<num> : max # of threads OMP should use at outer level\n"
+         "\n  Help:\n"
+         "    -h : print this help message\n");
+}
+
+// Process command-line arguments
+void process_args(int argc, char *argv[], int *max_outer_t) {
+  (*max_outer_t) = omp_get_max_threads();
+  for (int i=1; i<argc; ++i) {  
+    if (argv[i][0] == '-') {
+      switch (argv[i][1]) {
+      case 'o': // set max_outer_threads
+       if (sscanf(&argv[i][2], "%d", max_outer_t) != 1 || *max_outer_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'h': // print help message
+       help_message(argv[0]);
+       exit(0);
+       break;
+      default:
+       fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+       help_message(argv[0]);
+       break;
+      }
+    } else {
+      fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+      help_message(argv[0]);
+    }
+  }
+}
+
+int main(int argc, char *argv[]) { 
+  process_args(argc, argv, &max_outer_threads);
+#ifdef LOG_THREADS
+  TotalThreadLevel.init();
+#endif
+
+  double start, end;
+  start = omp_get_wtime( );
+  
+#pragma omp parallel num_threads(max_outer_threads)
+  {
+    int omp_thread = omp_get_thread_num();
+#ifdef LOG_THREADS
+    if (omp_thread == 0)
+      TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer);
+#endif
+    if (omp_thread == 0) {
+      MilliSleep(3000);
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(-1, omp_outer);
+#endif
+#pragma omp parallel
+      {
+       int my_omp_thread = omp_get_thread_num();
+#ifdef LOG_THREADS
+       if (my_omp_thread == 0)
+         TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner);
+#endif
+       printf("Inner thread %d nested inside outer thread %d\n", my_omp_thread, omp_thread);
+#ifdef LOG_THREADS
+       if (my_omp_thread == 0)
+         TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner);
+#endif
+      }
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(1, omp_outer);
+#endif
+    }
+    else {
+      MilliSleep(6000);
+    }
+#ifdef LOG_THREADS
+    if (omp_thread == 0)
+      TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer);
+#endif
+  }
+  end = omp_get_wtime( );
+  printf("Simple test of nested OMP (%d outer threads max) took: %6.6f\n",
+        max_outer_threads, end-start);
+#ifdef LOG_THREADS
+  TotalThreadLevel.dump();
+#endif
+  return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_simple.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/omp_simple.cpp
new file mode 100644 (file)
index 0000000..ddfcef8
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdio>
+#include <float.h>
+#include <math.h>
+#include <time.h>
+
+#include <omp.h>
+#include <assert.h>
+
+#include "thread_level.h"
+
+#include "tbb/task.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+
+#if _WIN32||_WIN64
+#include <Windows.h> /* Need Sleep */
+#else
+#include <unistd.h>  /* Need usleep */
+#endif
+
+void MilliSleep( unsigned milliseconds ) {
+#if _WIN32||_WIN64
+    Sleep( milliseconds );
+#else
+    usleep( milliseconds*1000 );
+#endif /* _WIN32||_WIN64 */
+}
+
+using namespace std;
+using namespace tbb;
+
+// Algorithm parameters
+const int Max_TBB_Threads = 16;
+const int Max_OMP_Threads = 16;
+
+// Global variables
+int max_tbb_threads = Max_TBB_Threads;
+int max_omp_threads = Max_OMP_Threads;
+
+// Print help on command-line arguments
+void help_message(char *prog_name) {
+  fprintf(stderr, "\n%s usage:\n", prog_name);
+  fprintf(stderr, 
+         "  Parameters:\n"
+         "    -t<num> : max # of threads TBB should use\n"
+         "    -o<num> : max # of threads OMP should use\n"
+         "\n  Help:\n"
+         "    -h : print this help message\n");
+}
+
+// Process command-line arguments
+void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) {
+  for (int i=1; i<argc; ++i) {  
+    if (argv[i][0] == '-') {
+      switch (argv[i][1]) {
+      case 't': // set max_tbb_threads
+       if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'o': // set max_omp_threads
+       if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'h': // print help message
+       help_message(argv[0]);
+       exit(0);
+       break;
+      default:
+       fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+       help_message(argv[0]);
+       break;
+      }
+    } else {
+      fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+      help_message(argv[0]);
+    }
+  }
+}
+
+int main(int argc, char *argv[]) { 
+  process_args(argc, argv, &max_tbb_threads, &max_omp_threads);
+  TotalThreadLevel.init();
+
+  double start, end;
+  start = omp_get_wtime();
+  
+#pragma omp parallel num_threads(max_omp_threads)
+  {
+    int omp_thread = omp_get_thread_num();
+#ifdef LOG_THREADS
+    if (omp_thread == 0)
+      TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer);
+#endif
+    task_scheduler_init phase(max_tbb_threads);
+    if (omp_thread == 0) {
+      MilliSleep(3000);
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(-1, omp_outer);
+#endif
+      parallel_for(blocked_range<size_t>(0, 1000), 
+                  [=](const blocked_range<size_t>& range) {
+#ifdef LOG_THREADS
+       TotalThreadLevel.change_level(1, tbb_inner);
+#endif
+#pragma ivdep
+       for (size_t i=range.begin(); i!=range.end(); ++i) {
+         if (i==range.begin())
+           printf("TBB range starting at %d on OMP thread %d\n", (int)i, omp_thread);
+       }
+#ifdef LOG_THREADS
+       TotalThreadLevel.change_level(-1, tbb_inner);
+#endif
+      }, auto_partitioner());
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(1, omp_outer);
+#endif
+    }
+    else {
+      MilliSleep(6000);
+    }
+#ifdef LOG_THREADS
+    if (omp_thread == 0)
+      TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer);
+#endif
+  }
+  end = omp_get_wtime();
+  printf("Simple test of OMP (%d threads max) with TBB (%d threads max) inside took: %6.6f\n",
+        max_omp_threads, max_tbb_threads, end-start);
+#ifdef LOG_THREADS
+  TotalThreadLevel.dump();
+#endif
+  return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp
new file mode 100644 (file)
index 0000000..9f4442b
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdio>
+#include <float.h>
+#include <math.h>
+#include <time.h>
+
+#include <omp.h>
+#include <assert.h>
+
+#include "thread_level.h"
+
+#include "tbb/task.h"
+#include "tbb/tick_count.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/scalable_allocator.h"
+
+#if _WIN32||_WIN64
+#include <Windows.h> /* Need Sleep */
+#else
+#include <unistd.h>  /* Need usleep */
+#endif
+
+void MilliSleep( unsigned milliseconds ) {
+#if _WIN32||_WIN64
+    Sleep( milliseconds );
+#else
+    usleep( milliseconds*1000 );
+#endif /* _WIN32||_WIN64 */
+}
+
+using namespace std;
+using namespace tbb;
+
+// Algorithm parameters
+const int Max_TBB_Threads = 16;
+const int Max_OMP_Threads = 16;
+
+// Global variables
+int max_tbb_threads = Max_TBB_Threads;
+int max_omp_threads = Max_OMP_Threads;
+
+// Print help on command-line arguments
+void help_message(char *prog_name) {
+  fprintf(stderr, "\n%s usage:\n", prog_name);
+  fprintf(stderr, 
+         "  Parameters:\n"
+         "    -t<num> : max # of threads TBB should use\n"
+         "    -o<num> : max # of threads OMP should use\n"
+         "\n  Help:\n"
+         "    -h : print this help message\n");
+}
+
+// Process command-line arguments
+void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) {
+  for (int i=1; i<argc; ++i) {  
+    if (argv[i][0] == '-') {
+      switch (argv[i][1]) {
+      case 't': // set max_tbb_threads
+       if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'o': // set max_omp_threads
+       if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'h': // print help message
+       help_message(argv[0]);
+       exit(0);
+       break;
+      default:
+       fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+       help_message(argv[0]);
+       break;
+      }
+    } else {
+      fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+      help_message(argv[0]);
+    }
+  }
+}
+
+class SimpleTask : public task {
+  bool isLeaf;
+  int myId;
+public:
+  SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {}
+  task* execute() {
+#ifdef LOG_THREADS
+    TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+    omp_set_num_threads(max_omp_threads);
+    if (!isLeaf) {
+      set_ref_count(65);
+      for (int i=0; i<64; ++i) {
+       SimpleTask& st = *new(allocate_child()) SimpleTask(true, i);
+       spawn(st);
+      }
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+      wait_for_all();
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+    }
+    else {
+      if (myId%2 == 0) {
+       MilliSleep(3000);
+#pragma omp parallel
+       {
+#ifdef LOG_THREADS
+         if (omp_get_thread_num() == 0)
+           TotalThreadLevel.change_level(omp_get_num_threads()-1, omp_inner);
+#endif
+         //printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", omp_get_thread_num(), omp_get_num_threads());
+#ifdef LOG_THREADS
+         if (omp_get_thread_num() == 0)
+           TotalThreadLevel.change_level(-(omp_get_num_threads()-1), omp_inner);
+#endif
+       }
+      }
+      else {
+       MilliSleep(6000);
+      }
+    }
+#ifdef LOG_THREADS
+    TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+    return NULL;
+  }
+};
+
+
+int main(int argc, char *argv[]) { 
+#ifdef LOG_THREADS
+  TotalThreadLevel.init();
+  TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+  process_args(argc, argv, &max_tbb_threads, &max_omp_threads);
+
+  task_scheduler_init phase(max_tbb_threads);
+  tick_count start, end;
+  start = tick_count::now();
+  SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1);
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+  task::spawn_root_and_wait(st);
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+  end = tick_count::now();
+  printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", 
+        max_tbb_threads, max_omp_threads, (end-start).seconds());
+
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(-1, tbb_outer);
+  TotalThreadLevel.dump();
+#endif
+  return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_simple.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/tbb_simple.cpp
new file mode 100644 (file)
index 0000000..0b01fa9
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdio>
+#include <float.h>
+#include <math.h>
+#include <time.h>
+
+#include <omp.h>
+#include <assert.h>
+
+#include "thread_level.h"
+
+#include "tbb/task.h"
+#include "tbb/tick_count.h"
+#include "tbb/task_scheduler_init.h"
+
+#if _WIN32||_WIN64
+#include <Windows.h> /* Need Sleep */
+#else
+#include <unistd.h>  /* Need usleep */
+#endif
+
+void MilliSleep( unsigned milliseconds ) {
+#if _WIN32||_WIN64
+    Sleep( milliseconds );
+#else
+    usleep( milliseconds*1000 );
+#endif /* _WIN32||_WIN64 */
+}
+
+using namespace std;
+using namespace tbb;
+
+// Algorithm parameters
+const int Max_TBB_Threads = 16;
+const int Max_OMP_Threads = 16;
+
+// Global variables
+int max_tbb_threads = Max_TBB_Threads;
+int max_omp_threads = Max_OMP_Threads;
+
+// Print help on command-line arguments
+void help_message(char *prog_name) {
+  fprintf(stderr, "\n%s usage:\n", prog_name);
+  fprintf(stderr, 
+         "  Parameters:\n"
+         "    -t<num> : max # of threads TBB should use\n"
+         "    -o<num> : max # of threads OMP should use\n"
+         "\n  Help:\n"
+         "    -h : print this help message\n");
+}
+
+// Process command-line arguments
+void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) {
+  for (int i=1; i<argc; ++i) {  
+    if (argv[i][0] == '-') {
+      switch (argv[i][1]) {
+      case 't': // set max_tbb_threads
+       if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'o': // set max_omp_threads
+       if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) {
+         fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]);
+         help_message(argv[0]);
+       }
+       break;
+      case 'h': // print help message
+       help_message(argv[0]);
+       exit(0);
+       break;
+      default:
+       fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+       help_message(argv[0]);
+       break;
+      }
+    } else {
+      fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]);
+      help_message(argv[0]);
+    }
+  }
+}
+
+class SimpleTask : public task {
+  bool isLeaf;
+  int myId;
+public:
+  SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {}
+  task* execute() {
+#ifdef LOG_THREADS
+    TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+    omp_set_num_threads(max_omp_threads);
+    if (!isLeaf) {
+      set_ref_count(17);
+      for (int i=0; i<16; ++i) {
+       SimpleTask& st = *new(allocate_child()) SimpleTask(true, i);
+       spawn(st);
+      }
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+      wait_for_all();
+#ifdef LOG_THREADS
+      TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+    }
+    else {
+      if (myId == 0) {
+       MilliSleep(3000);
+#ifdef LOG_THREADS
+       TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+#pragma omp parallel
+       {
+#ifdef LOG_THREADS
+         if (omp_get_thread_num() == 0)
+           TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner);
+#endif
+         printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", 
+                omp_get_thread_num(), omp_get_num_threads());
+#ifdef LOG_THREADS
+         if (omp_get_thread_num() == 0)
+           TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner);
+#endif
+       }
+#ifdef LOG_THREADS
+       TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+      }
+      else {
+       MilliSleep(6000);
+      }
+    }
+#ifdef LOG_THREADS
+    TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+    return NULL;
+  }
+};
+
+
+int main(int argc, char *argv[]) { 
+#ifdef LOG_THREADS
+  TotalThreadLevel.init();
+  TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+  process_args(argc, argv, &max_tbb_threads, &max_omp_threads);
+
+  task_scheduler_init phase(max_tbb_threads);
+  tick_count start, end;
+  start = tick_count::now();
+  SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1);
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(-1, tbb_outer);
+#endif
+  task::spawn_root_and_wait(st);
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(1, tbb_outer);
+#endif
+  end = tick_count::now();
+  printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", 
+        max_tbb_threads, max_omp_threads, (end-start).seconds());
+#ifdef LOG_THREADS
+  TotalThreadLevel.change_level(-1, tbb_outer);
+  TotalThreadLevel.dump();
+#endif
+  return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/thread_level.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/perfor/thread_level.h
new file mode 100644 (file)
index 0000000..339b72e
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Thread level recorder
+#ifndef __THREAD_LEVEL_H
+#define __THREAD_LEVEL_H
+#include <cstdio>
+#include <omp.h>
+#include <assert.h>
+#include "tbb/atomic.h"
+#include "tbb/tick_count.h"
+
+//#define LOG_THREADS // use this to ifdef out calls to this class 
+//#define NO_BAIL_OUT // continue execution after detecting oversubscription
+
+using namespace tbb;
+
+typedef enum {tbb_outer, tbb_inner, omp_outer, omp_inner} client_t;
+
+class ThreadLevelRecorder {
+  tbb::atomic<int> tbb_outer_level;
+  tbb::atomic<int> tbb_inner_level;
+  tbb::atomic<int> omp_outer_level;
+  tbb::atomic<int> omp_inner_level;
+  struct record {
+    tbb::tick_count time;
+    int n_tbb_outer_thread;
+    int n_tbb_inner_thread;
+    int n_omp_outer_thread;
+    int n_omp_inner_thread;
+  };
+  tbb::atomic<unsigned> next;
+  /** Must be power of two */
+  static const unsigned max_record_count = 1<<20;
+  record array[max_record_count];
+  int max_threads;
+  bool fail;
+ public:
+  void change_level(int delta, client_t whichClient);
+  void dump();
+  void init();
+};
+
+void ThreadLevelRecorder::change_level(int delta, client_t whichClient) {
+  int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level;
+  if (whichClient == tbb_outer) {
+    tox = tbb_outer_level+=delta;
+  } else if (whichClient == tbb_inner) {
+    tix = tbb_inner_level+=delta;
+  } else if (whichClient == omp_outer) {
+    oox = omp_outer_level+=delta;
+  } else if (whichClient == omp_inner) {
+    oix = omp_inner_level+=delta;
+  } else {
+    printf("WARNING: Bad client type; ignoring.\n");
+    return;
+  }
+  // log non-negative entries
+  tbb::tick_count t = tbb::tick_count::now();
+  unsigned k = next++;
+  if (k<max_record_count) {
+    record& r = array[k];
+    r.time = t;
+    r.n_tbb_outer_thread = tox>=0?tox:0;
+    r.n_omp_outer_thread = oox>=0?oox:0;
+    r.n_tbb_inner_thread = tix>=0?tix:0;
+    r.n_omp_inner_thread = oix>=0?oix:0;
+  }
+  char errStr[100];
+  int tot_threads;
+  tot_threads = tox+tix+oox+oix;
+  sprintf(errStr, "ERROR: Number of threads (%d+%d+%d+%d=%d) in use exceeds maximum (%d).\n", 
+         tox, tix, oox, oix, tot_threads, max_threads);
+  if (tot_threads > max_threads) {
+#ifdef NO_BAIL_OUT
+    if (!fail) {
+      printf("%sContinuing...\n", errStr);
+      fail = true;
+    }
+#else
+    dump();
+    printf("%s\n", errStr);
+    assert(tot_threads <= max_threads);
+#endif
+  }
+}
+
+void ThreadLevelRecorder::dump() {
+  FILE* f = fopen("time.txt","w");
+  if (!f) {
+    perror("fopen(time.txt)\n");
+    exit(1);
+  }
+  unsigned limit = next;
+  if (limit>max_record_count) { // Clip
+    limit = max_record_count;
+  }
+  for (unsigned i=0; i<limit; ++i) {
+    fprintf(f,"%f\t%d\t%d\t%d\t%d\n",(array[i].time-array[0].time).seconds(), array[i].n_tbb_outer_thread,
+           array[i].n_tbb_inner_thread, array[i].n_omp_outer_thread, array[i].n_omp_inner_thread);
+  }
+  fclose(f);
+  int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level;
+  int tot_threads;
+  tot_threads = tox+tix+oox+oix;
+  if (!fail) printf("INFO: Passed.\n");
+  else printf("INFO: Failed.\n");
+}
+
+void ThreadLevelRecorder::init() {
+  fail = false;
+  max_threads = omp_get_max_threads();
+  printf("INFO: Getting maximum hardware threads... %d.\n", max_threads);
+}
+
+ThreadLevelRecorder TotalThreadLevel;
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/index.html
new file mode 100644 (file)
index 0000000..238b166
--- /dev/null
@@ -0,0 +1,19 @@
+<HTML>
+<BODY>
+<H2>Overview</H2>
+
+This directory has source code internal to the server.
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/irml.rc b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/irml.rc
new file mode 100644 (file)
index 0000000..267c2f2
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+// Microsoft Visual C++ generated resource script.
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NO_MFC                     1
+#define _APS_NEXT_RESOURCE_VALUE        102
+#define _APS_NEXT_COMMAND_VALUE         40001
+#define _APS_NEXT_CONTROL_VALUE         1001
+#define _APS_NEXT_SYMED_VALUE           101
+#endif
+#endif
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include <winresrc.h>
+#define ENDL "\r\n"
+#include "tbb/tbb_version.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// Neutral resources
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU)
+#ifdef _WIN32
+LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
+#pragma code_page(1252)
+#endif //_WIN32
+
+/////////////////////////////////////////////////////////////////////////////
+// manifest integration
+#ifdef TBB_MANIFEST
+#include "winuser.h"
+2 RT_MANIFEST tbbmanifest.exe.manifest
+#endif
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION TBB_VERNUMBERS
+ PRODUCTVERSION TBB_VERNUMBERS
+ FILEFLAGSMASK 0x17L
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x40004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+    BLOCK "StringFileInfo"
+    BEGIN
+        BLOCK "000004b0"
+        BEGIN
+            VALUE "CompanyName", "Intel Corporation\0"
+            VALUE "FileDescription", "Threading Building Blocks resource manager library\0"
+            VALUE "FileVersion", TBB_VERSION "\0"
+//what is it?            VALUE "InternalName", "irml\0"
+            VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation.  All Rights Reserved.\0"
+            VALUE "LegalTrademarks", "\0"
+#ifndef TBB_USE_DEBUG
+            VALUE "OriginalFilename", "irml.dll\0"
+#else
+            VALUE "OriginalFilename", "irml_debug.dll\0"
+#endif
+            VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0"
+            VALUE "ProductVersion", TBB_VERSION "\0"
+            VALUE "Comments", TBB_VERSION_STRINGS "\0"
+            VALUE "PrivateBuild", "\0"
+            VALUE "SpecialBuild", "\0"
+        END
+    END
+    BLOCK "VarFileInfo"
+    BEGIN
+        VALUE "Translation", 0x0, 1200
+    END
+END
+
+#endif    // Neutral resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////
+#endif    // not APSTUDIO_INVOKED
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/job_automaton.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/job_automaton.h
new file mode 100644 (file)
index 0000000..514418e
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __RML_job_automaton_H
+#define __RML_job_automaton_H
+
+#include "rml_base.h"
+#include "tbb/atomic.h"
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings 
+    #pragma warning (push)
+    #pragma warning (disable: 4244)
+#endif
+
+namespace rml {
+
+namespace internal {
+
+//! Finite state machine.   
+/**   /--------------\
+     /                V
+    0 --> 1--> ptr --> -1 
+                ^
+                |
+                |
+                V
+              ptr|1
+
+"owner" = corresponding server_thread.
+Odd states (except -1) indicate that someone is executing code on the job.
+Most transitions driven only by owner.
+Transition 0-->-1 is driven by non-owner.
+Transition ptr->-1 is driven  by owner or non-owner.
+*/ 
+class job_automaton: no_copy {
+private:
+    tbb::atomic<intptr_t> my_job;
+public:
+    /** Created by non-owner */
+    job_automaton() {
+        my_job = 0;
+    }
+    ~job_automaton() {
+        __TBB_ASSERT( my_job==-1, "must plug before destroying" );
+    }
+
+    //! Try to transition 0-->1 or ptr-->ptr|1.
+    /** Should only be called by owner. */
+    bool try_acquire() {
+        intptr_t snapshot = my_job;
+        if( snapshot==-1 ) {
+            return false;
+        } else {
+            __TBB_ASSERT( (snapshot&1)==0, "already marked that way" );
+            intptr_t old = my_job.compare_and_swap( snapshot|1, snapshot );
+            __TBB_ASSERT( old==snapshot || old==-1, "unexpected interference" );  
+            return old==snapshot;
+        }
+    }
+    //! Transition ptr|1-->ptr
+    /** Should only be called by owner. */
+    void release() {
+        intptr_t snapshot = my_job;
+        __TBB_ASSERT( snapshot&1, NULL );
+        // Atomic store suffices here.
+        my_job = snapshot&~1;
+    }
+
+    //! Transition 1-->ptr
+    /** Should only be called by owner. */
+    void set_and_release( rml::job& job ) {
+        intptr_t value = reinterpret_cast<intptr_t>(&job);
+        __TBB_ASSERT( (value&1)==0, "job misaligned" );
+        __TBB_ASSERT( value!=0, "null job" );
+        __TBB_ASSERT( my_job==1, "already set, or not marked busy?" );
+        // Atomic store suffices here.
+        my_job = value;
+    }
+
+    //! Transition 0-->-1
+    /** If successful, return true. called by non-owner (for TBB and the likes) */
+    bool try_plug_null() {
+        return my_job.compare_and_swap( -1, 0 )==0;
+    }
+
+    //! Try to transition to -1.  If successful, set j to contents and return true.
+    /** Called by owner or non-owner. (for OpenMP and the likes) */
+    bool try_plug( rml::job*&j ) {
+        for(;;) {
+            intptr_t snapshot = my_job;
+            if( snapshot&1 ) {
+                j = NULL;
+                return false;
+            } 
+            // Not busy
+            if( my_job.compare_and_swap( -1, snapshot )==snapshot ) {
+                j = reinterpret_cast<rml::job*>(snapshot);
+                return true;
+            } 
+            // Need to retry, because current thread may be non-owner that read a 0, and owner might have
+            // caused transition 0->1->ptr after we took our snapshot.
+        }
+    }
+
+    /** Called by non-owner to wait for transition to ptr. */
+    rml::job& wait_for_job() const {
+        intptr_t snapshot;
+        for(;;) {
+            snapshot = my_job;
+            if( snapshot&~1 ) break;
+            __TBB_Yield();
+        }
+        __TBB_ASSERT( snapshot!=-1, "wait on plugged job_automaton" );
+        return *reinterpret_cast<rml::job*>(snapshot&~1);
+    }
+};
+
+} // namespace internal
+} // namespace rml
+
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif // warning 4244 are back
+
+#endif /* __RML_job_automaton_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/lin-rml-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/lin-rml-export.def
new file mode 100644 (file)
index 0000000..868b65a
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+__RML_open_factory;
+__RML_close_factory;
+__TBB_make_rml_server;
+__KMP_make_rml_server;
+__TBB_call_with_my_server_info;
+__KMP_call_with_my_server_info;
+local:*;
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/rml_server.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/rml_server.cpp
new file mode 100644 (file)
index 0000000..774b6eb
--- /dev/null
@@ -0,0 +1,3330 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_tbb.h"
+#define private public /* Sleazy trick to avoid publishing internal names in public header. */
+#include "rml_omp.h"
+#undef private
+
+#include "tbb/tbb_allocator.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/aligned_space.h"
+#include "tbb/atomic.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/tbb_misc.h"           // Get DetectNumberOfWorkers() from here.
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+// VS2008/VC9 seems to have an issue; 
+#pragma warning( push )
+#pragma warning( disable: 4985 )
+#endif
+#include "tbb/concurrent_vector.h"
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+#pragma warning( pop )
+#endif
+#if _MSC_VER && defined(_Wp64)
+// Workaround for overzealous compiler warnings
+#pragma warning (push)
+#pragma warning (disable: 4244)
+#endif
+
+#include "job_automaton.h"
+#include "wait_counter.h"
+#include "thread_monitor.h"
+
+#if RML_USE_WCRM
+#include <concrt.h>
+#include <concrtrm.h>
+using namespace Concurrency;
+#include <vector>
+#include <hash_map>
+#define __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED 0
+#endif /* RML_USE_WCRM */
+
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+
+namespace rml {
+namespace internal {
+
+//! Number of hardware contexts
+static inline unsigned hardware_concurrency() {
+    static unsigned DefaultNumberOfThreads = 0;
+    unsigned n = DefaultNumberOfThreads;
+    if( !n ) DefaultNumberOfThreads = n = tbb::internal::DetectNumberOfWorkers();
+    return n;
+}
+
+using tbb::internal::rml::tbb_client;
+using tbb::internal::rml::tbb_server;
+
+using __kmp::rml::omp_client;
+using __kmp::rml::omp_server;
+
+typedef versioned_object::version_type version_type;
+
+#define SERVER_VERSION 2
+#define EARLIEST_COMPATIBLE_CLIENT_VERSION 2
+
+static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize;
+
+template<typename Server, typename Client> class generic_connection;
+class tbb_connection_v2;
+class omp_connection_v2;
+
+#if RML_USE_WCRM
+//! State of a server_thread
+/** Below are diagrams of legal state transitions.
+
+                          ts_busy         
+                          ^      ^
+                         /        \
+                        /          V
+    ts_done <----- ts_asleep <------> ts_idle 
+*/
+
+enum thread_state_t {
+    ts_idle,
+    ts_asleep,
+    ts_busy,
+    ts_done
+};
+
+//! Extra state of an omp server thread
+enum thread_extra_state_t {
+    ts_none,
+    ts_removed,
+    ts_lent
+};
+
+//! Results from try_grab_for()
+enum thread_grab_t {
+    wk_failed,
+    wk_from_asleep,
+    wk_from_idle
+};
+
+#else /* !RML_USE_WCRM */
+
+//! State of a server_thread
+/** Below are diagrams of legal state transitions.
+
+    OMP
+              ts_omp_busy               
+              ^          ^       
+             /            \       
+            /              V       
+    ts_asleep <-----------> ts_idle 
+
+
+              ts_deactivated
+             ^            ^
+            /              \
+           V                \
+    ts_none  <--------------> ts_reactivated
+
+    TBB 
+              ts_tbb_busy               
+              ^          ^       
+             /            \       
+            /              V       
+    ts_asleep <-----------> ts_idle --> ts_done
+
+    For TBB only. Extra state transition.
+
+    ts_created -> ts_started -> ts_visited
+ */
+enum thread_state_t {
+    //! Thread not doing anything useful, but running and looking for work. 
+    ts_idle,
+    //! Thread not doing anything useful and is asleep */
+    ts_asleep,
+    //! Thread is enlisted into OpenMP team
+    ts_omp_busy,
+    //! Thread is busy doing TBB work.
+    ts_tbb_busy,
+    //! For tbb threads only
+    ts_done,
+    ts_created,
+    ts_started,
+    ts_visited,
+    //! For omp threads only
+    ts_none,
+    ts_deactivated,
+    ts_reactivated
+};
+#endif /* RML_USE_WCRM */
+
+#if TBB_USE_ASSERT
+#define PRODUCE_ARG(x) ,x
+#else
+#define PRODUCE_ARG(x) 
+#endif /* TBB_USE_ASSERT */
+
+//! Synchronizes dispatch of OpenMP work.
+class omp_dispatch_type {
+    typedef ::rml::job job_type;
+    omp_client* client;
+    void* cookie;
+    omp_client::size_type index;
+    tbb::atomic<job_type*> job;
+#if TBB_USE_ASSERT
+    omp_connection_v2* server;
+#endif /* TBB_USE_ASSERT */
+public:
+    omp_dispatch_type() {job=NULL;}
+    void consume();
+    void produce( omp_client& c, job_type& j, void* cookie_, omp_client::size_type index_ PRODUCE_ARG( omp_connection_v2& s )) {
+        __TBB_ASSERT( &j, NULL );
+        __TBB_ASSERT( !job, "job already set" );
+        client = &c;
+#if TBB_USE_ASSERT
+        server = &s;
+#endif /* TBB_USE_ASSERT */
+        cookie = cookie_;
+        index = index_;
+        // Must be last
+        job = &j;
+    }
+};
+
+//! A reference count.
+/** No default constructor, because users of ref_count must be very careful about whether the 
+    initial reference count is 0 or 1. */
+class ref_count: no_copy {
+    friend class thread_map;
+    tbb::atomic<int> my_ref_count;
+public:
+    ref_count(int k ) {my_ref_count=k;}
+    ~ref_count() {__TBB_ASSERT( !my_ref_count, "premature destruction of refcounted object" );}
+    //! Add one and return new value.
+    int add_ref() {
+        int k = ++my_ref_count;
+        __TBB_ASSERT(k>=1,"reference count underflowed before add_ref");
+        return k;
+    }
+    //! Subtract one and return new value.
+    int remove_ref() {
+        int k = --my_ref_count; 
+        __TBB_ASSERT(k>=0,"reference count underflow");
+        return k;
+    }
+};
+
+#if RML_USE_WCRM
+
+#if USE_UMS_THREAD
+#define RML_THREAD_KIND UmsThreadDefault
+#define RML_THREAD_KIND_STRING "UmsThread"
+#else
+#define RML_THREAD_KIND ThreadScheduler
+#define RML_THREAD_KIND_STRING "WinThread"
+#endif
+
+// Forward declaration
+class thread_map;
+
+static const IExecutionResource* c_remove_prepare = (IExecutionResource*)0;
+static const IExecutionResource* c_remove_returned = (IExecutionResource*)1;
+
+//! Server thread representation
+class server_thread_rep : no_copy {
+    friend class thread_map;
+    friend class omp_connection_v2;
+    friend class server_thread;
+    friend class tbb_server_thread;
+    friend class omp_server_thread;
+    template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t );
+    typedef int thread_state_rep_t;
+public:
+    //! Ctor
+    server_thread_rep( bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) :
+        uid( GetExecutionContextId() ), my_scheduler(s), my_proxy(NULL), 
+        my_thread_map(map), my_client(cl), my_job(NULL)
+    {
+        my_state = assigned ? ts_busy : ts_idle;
+        my_extra_state = ts_none;
+        terminate = false;
+        my_execution_resource = r;
+    }
+    //! Dtor
+    ~server_thread_rep() {}
+
+    //! Synchronization routine
+    inline rml::job* wait_for_job() {
+        if( !my_job ) my_job = &my_job_automaton.wait_for_job();
+        return my_job;
+    }
+
+    // Getters and setters
+    inline thread_state_t read_state() const { thread_state_rep_t s = my_state; return static_cast<thread_state_t>(s); }
+    inline void set_state( thread_state_t to ) {my_state = to;}
+    inline void set_removed() { __TBB_ASSERT( my_extra_state==ts_none, NULL ); my_extra_state = ts_removed; }
+    inline bool is_removed() const { return my_extra_state==ts_removed; }
+    inline bool is_lent() const {return my_extra_state==ts_lent;}
+    inline void set_lent() { my_extra_state=ts_lent; }
+    inline void set_returned() { my_extra_state=ts_none; }
+    inline IExecutionResource* get_execution_resource() { return my_execution_resource; }
+    inline IVirtualProcessorRoot* get_virtual_processor() { return (IVirtualProcessorRoot*)get_execution_resource(); }
+
+    //! Enlist the thread for work
+    inline bool wakeup( thread_state_t to, thread_state_t from ) {
+        __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_busy||to==ts_done), NULL );
+        return my_state.compare_and_swap( to, from )==from;
+    }
+
+    //! Enlist the thread for.
+    thread_grab_t try_grab_for();
+
+    //! Destroy the client job associated with the thread
+    template<typename Connection> bool destroy_job( Connection* c );
+
+    //! Try to re-use the thread
+    void revive( IScheduler* s, IExecutionResource* r, rml::client& c ) {
+        // the variables may not have been set before a thread was told to quit
+        __TBB_ASSERT( my_scheduler==s, "my_scheduler has been altered?\n" );
+        my_scheduler = s;
+        __TBB_ASSERT( &my_client==&c, "my_client has been altered?\n" );
+        if( r ) my_execution_resource = r;
+        my_client = c;
+        my_state = ts_idle;
+        __TBB_ASSERT( my_extra_state==ts_removed, NULL );
+        my_extra_state = ts_none;
+    }
+
+protected:
+    const int uid;
+    IScheduler* my_scheduler;
+    IThreadProxy* my_proxy;
+    tbb::atomic<IExecutionResource*> my_execution_resource; /* for non-masters, it is IVirtualProcessorRoot */
+    thread_map& my_thread_map;
+    rml::client& my_client;
+    job* my_job;
+    job_automaton my_job_automaton;
+    tbb::atomic<bool> terminate;
+    tbb::atomic<thread_state_rep_t> my_state;
+    tbb::atomic<thread_extra_state_t> my_extra_state;
+};
+
+//! Class that implements IExecutionContext
+class server_thread : public IExecutionContext, public server_thread_rep {
+    friend class tbb_connection_v2;
+    friend class omp_connection_v2;
+    friend class tbb_server_thread;
+    friend class omp_server_thread;
+    friend class thread_map;
+    template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t );
+protected:
+    server_thread( bool is_tbb, bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) : server_thread_rep(assigned,s,r,map,cl), tbb_thread(is_tbb) {}
+    ~server_thread() {}
+    /*override*/ unsigned int GetId() const { return uid; }
+    /*override*/ IScheduler* GetScheduler() { return my_scheduler; }
+    /*override*/ IThreadProxy* GetProxy()   { return my_proxy; }
+    /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; }
+
+private:
+    bool tbb_thread;
+};
+
+// Forward declaration
+class tbb_connection_v2;
+class omp_connection_v2;
+
+//! TBB server thread
+class tbb_server_thread : public server_thread {
+    friend class tbb_connection_v2;
+public:
+    tbb_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, tbb_connection_v2* con, thread_map& map, rml::client& cl ) : server_thread(true,assigned,s,r,map,cl), my_conn(con) {
+        activation_count = 0;
+    }
+    ~tbb_server_thread() {}
+    /*override*/ void Dispatch( DispatchState* );
+    inline bool initiate_termination();
+    bool sleep_perhaps();
+    //! Switch out this thread
+    bool switch_out();
+private:
+    tbb_connection_v2* my_conn;
+public:
+    tbb::atomic<int> activation_count;
+};
+
+//! OMP server thread
+class omp_server_thread : public server_thread {
+    friend class omp_connection_v2;
+public:
+    omp_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, omp_connection_v2* con, thread_map& map, rml::client& cl ) : 
+        server_thread(false,assigned,s,r,map,cl), my_conn(con), my_cookie(NULL), my_index(UINT_MAX) {}
+    ~omp_server_thread() {}
+    /*override*/ void Dispatch( DispatchState* );
+    inline void* get_cookie() {return my_cookie;}
+    inline ::__kmp::rml::omp_client::size_type get_index() {return my_index;}
+
+    inline IExecutionResource* get_execution_resource() { return get_execution_resource(); }
+    inline bool initiate_termination() { return destroy_job( (omp_connection_v2*) my_conn ); }
+    void sleep_perhaps();
+private:
+    omp_connection_v2* my_conn;
+    void* my_cookie;
+    ::__kmp::rml::omp_client::size_type my_index;
+    omp_dispatch_type omp_data;
+};
+
+//! Class that implements IScheduler
+template<typename Connection>
+class scheduler : no_copy, public IScheduler {
+public:
+    /*override*/ unsigned int GetId() const {return uid;}
+    /*override*/ void Statistics( unsigned int* /*pTaskCompletionRate*/, unsigned int* /*pTaskArrivalRate*/, unsigned int* /*pNumberOfTaskEnqueued*/) {}
+    /*override*/ SchedulerPolicy GetPolicy() const { __TBB_ASSERT(my_policy,NULL); return *my_policy; }
+    /*override*/ void AddVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ) { if( !my_conn.is_closing() ) my_conn.add_virtual_processors( vproots, count); }
+    /*override*/ void RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count );
+    /*override*/ void NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); }
+    /*override*/ void NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); }
+protected:
+    scheduler( Connection& conn );
+    virtual ~scheduler() { __TBB_ASSERT( my_policy, NULL ); delete my_policy; }
+
+public:
+    static scheduler* create( Connection& conn ) {return new scheduler( conn );}
+
+private:
+    const int uid;
+    Connection& my_conn;
+    SchedulerPolicy* my_policy;
+};
+
+
+/*
+ * --> ts_busy --> ts_done
+ */
+class thread_scavenger_thread : public IExecutionContext, no_copy {
+public:
+    thread_scavenger_thread( IScheduler* s, IVirtualProcessorRoot* r, thread_map& map ) :
+        uid( GetExecutionContextId() ), my_scheduler(s), my_virtual_processor_root(r), my_proxy(NULL), my_thread_map(map)
+    {
+        my_state = ts_busy;
+#if TBB_USE_ASSERT
+        activation_count = 0;
+#endif
+    }
+    ~thread_scavenger_thread() {}
+    /*override*/ unsigned int GetId() const { return uid; }
+    /*override*/ IScheduler* GetScheduler() { return my_scheduler; }
+    /*override*/ IThreadProxy* GetProxy()   { return my_proxy; }
+    /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; }
+    /*override*/ void Dispatch( DispatchState* );
+    inline thread_state_t read_state() { return my_state; }
+    inline void set_state( thread_state_t s ) { my_state = s; }
+    inline IVirtualProcessorRoot* get_virtual_processor() { return my_virtual_processor_root; }
+private:
+    const int uid;
+    IScheduler* my_scheduler;
+    IVirtualProcessorRoot* my_virtual_processor_root;
+    IThreadProxy* my_proxy;
+    thread_map& my_thread_map;
+    tbb::atomic<thread_state_t> my_state;
+#if TBB_USE_ASSERT
+public:
+    tbb::atomic<int> activation_count;
+#endif
+};
+
+static const thread_scavenger_thread* c_claimed = reinterpret_cast<thread_scavenger_thread*>(1);
+
+struct garbage_connection_queue {
+    tbb::atomic<uintptr_t> head;
+    tbb::atomic<uintptr_t> tail;
+    static const uintptr_t empty = 0; // connection scavenger thread empty list
+    static const uintptr_t plugged = 1;  // end of use of the list
+    static const uintptr_t plugged_acked = 2;  // connection scavenger saw the plugged flag, and it freed all connections
+};
+
+//! Connection scavenger
+/** It collects closed connection objects, wait for worker threads belonging to the connection to return to ConcRT RM
+ *  then return the object to the memory manager.
+ */
+class connection_scavenger_thread {
+    friend void assist_cleanup_connections();
+    /*
+     * connection_scavenger_thread's state
+     * ts_busy <----> ts_asleep <--
+     */
+    tbb::atomic<thread_state_t> state;
+
+    /* We steal two bits from a connection pointer to encode 
+     * whether the connection is for TBB or for OMP.
+     *
+     * ----------------------------------
+     * |                          |  |  |
+     * ----------------------------------
+     *                              ^  ^ 
+     *                             /   |
+     *            1 : tbb, 0 : omp     |
+     *                  if set, terminate
+     */
+    // FIXME: pad these?
+    thread_monitor monitor;
+    int default_concurrency;
+    HANDLE thr_handle;
+#if TBB_USE_ASSERT
+    tbb::atomic<int> n_scavenger_threads;
+#endif
+
+public:
+    connection_scavenger_thread() : thr_handle(NULL) {
+        state = ts_asleep;
+#if TBB_USE_ASSERT
+        n_scavenger_threads = 0; 
+#endif
+    }
+
+    ~connection_scavenger_thread() {}
+
+    void wakeup() {
+        if( state.compare_and_swap( ts_busy, ts_asleep )==ts_asleep )
+            monitor.notify();
+    }
+
+    void sleep_perhaps();
+
+    void process_requests( uintptr_t conn_ex );
+
+    static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg );
+
+    void launch( int dc ) { 
+        default_concurrency = dc; 
+        thread_monitor::launch( connection_scavenger_thread::thread_routine, this, NULL ); 
+    }
+
+    template<typename Server, typename Client>
+    void add_request( generic_connection<Server,Client>* conn_to_close );
+
+    template<typename Server, typename Client>
+    uintptr_t grab_and_prepend( generic_connection<Server,Client>* last_conn_to_close );
+};
+
+void free_all_connections( uintptr_t );
+
+#endif /* RML_USE_WCRM */
+
+#if !RML_USE_WCRM
+class server_thread;
+
+//! thread_map_base; we need to make the iterator type available to server_thread
+struct thread_map_base {
+    //! A value in the map
+    class value_type {
+    public:
+        server_thread& thread() {
+            __TBB_ASSERT( my_thread, "thread_map::value_type::thread() called when !my_thread" );
+            return *my_thread;
+        }
+        rml::job& job() {
+            __TBB_ASSERT( my_job, "thread_map::value_type::job() called when !my_job" );
+            return *my_job;
+        }
+        value_type() : my_thread(NULL), my_job(NULL) {}
+        server_thread& wait_for_thread() const {
+            for(;;) {
+                server_thread* ptr=const_cast<server_thread*volatile&>(my_thread);
+                if( ptr ) 
+                    return *ptr;
+                __TBB_Yield();
+            } 
+        }
+        /** Shortly after when a connection is established, it is possible for the server
+            to grab a server_thread that has not yet created a job object for that server. */
+        rml::job& wait_for_job() const {
+            if( !my_job ) {
+                my_job = &my_automaton.wait_for_job();
+            }
+            return *my_job;
+        }
+    private:
+        server_thread* my_thread;
+        /** Marked mutable because though it is physically modified, conceptually it is a duplicate of 
+            the job held by job_automaton. */
+        mutable rml::job* my_job;
+        job_automaton my_automaton;
+        // FIXME - pad out to cache line, because my_automaton is hit hard by thread()
+        friend class thread_map;
+    };
+    typedef tbb::concurrent_vector<value_type,tbb::zero_allocator<value_type,tbb::cache_aligned_allocator> > array_type;
+};
+#endif /* !RML_USE_WCRM */
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous compiler warnings about uninstantiatble class
+    #pragma warning(push)
+    #pragma warning(disable:4510 4610)
+#endif
+
+template<typename T>
+class padded: public T {
+    char pad[cache_line_size - sizeof(T)%cache_line_size];
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning(pop)
+#endif
+
+// FIXME - should we pad out memory to avoid false sharing of our global variables?
+static tbb::atomic<int> the_balance;
+static tbb::atomic<int> the_balance_inited;
+
+#if !RML_USE_WCRM
+//! Per thread information 
+/** ref_count holds number of clients that are using this, 
+    plus 1 if a host thread owns this instance. */
+class server_thread: public ref_count {
+    friend class thread_map;
+    template<typename Server, typename Client> friend class generic_connection;
+    friend class tbb_connection_v2;
+    friend class omp_connection_v2;
+    //! Integral type that can hold a thread_state_t
+    typedef int thread_state_rep_t;
+    tbb::atomic<thread_state_rep_t> state;
+public:
+    thread_monitor monitor;
+private:
+    bool    is_omp_thread;
+    tbb::atomic<thread_state_rep_t> my_extra_state;
+    server_thread* link;
+    thread_map_base::array_type::iterator my_map_pos;
+    rml::server *my_conn;
+    rml::job* my_job;
+    job_automaton* my_ja;
+    size_t my_index;
+    tbb::atomic<bool> terminate;
+    omp_dispatch_type omp_dispatch;
+
+#if TBB_USE_ASSERT
+    //! Flag used to check if thread is still using *this.
+    bool has_active_thread;
+#endif /* TBB_USE_ASSERT */
+
+    //! Volunteer to sleep. 
+    void sleep_perhaps( thread_state_t asleep );
+
+    //! Destroy job corresponding to given client
+    /** Return true if thread must quit. */
+    template<typename Connection>
+    bool destroy_job( Connection& c );
+
+    //! Do terminate the thread
+    /** Return true if thread must quit. */
+    bool do_termination();
+
+    void loop();
+    static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); 
+
+public:
+    server_thread();
+
+    ~server_thread();
+
+    //! Read the thread state
+    thread_state_t read_state() const {
+        thread_state_rep_t s = state;
+        __TBB_ASSERT( unsigned(s)<=unsigned(ts_done), "corrupted server thread?" );
+        return thread_state_t(s);
+    }
+
+    //! Read the tbb-specific extra thread state
+    thread_state_t read_extra_state() const {
+        thread_state_rep_t s = my_extra_state;
+        return thread_state_t(s);
+    }
+
+    //! Launch a thread that is bound to *this.
+    void launch( size_t stack_size );
+
+    //! Attempt to wakeup a thread 
+    /** The value "to" is the new state for the thread, if it was woken up.
+        Returns true if thread was woken up, false otherwise. */
+    bool wakeup( thread_state_t to, thread_state_t from );
+
+    //! Attempt to enslave a thread for OpenMP/TBB.
+    /** Returns true if state is successfully changed.  's' takes either ts_omp_busy or ts_tbb_busy */
+    bool try_grab_for( thread_state_t s );
+
+#if _WIN32||_WIN64
+    //! Send the worker thread to sleep temporarily
+    void deactivate();
+
+    //! Wake the worker thread up
+    void reactivate();
+#endif /* _WIN32||_WIN64 */
+};
+
+//! Bag of threads that are private to a client.
+class private_thread_bag {
+    struct list_thread: server_thread {
+       list_thread* next;
+    };
+    //! Root of atomic linked list of list_thread
+    /** ABA problem is avoided because items are only atomically pushed, never popped. */
+    tbb::atomic<list_thread*> my_root; 
+    tbb::cache_aligned_allocator<padded<list_thread> > my_allocator; 
+public:
+    //! Construct empty bag
+    private_thread_bag() {my_root=NULL;}
+
+    //! Create a fresh server_thread object.
+    server_thread& add_one_thread() {
+        list_thread* t = my_allocator.allocate(1);
+        new( t ) list_thread;
+        // Atomically add to list
+        list_thread* old_root;
+        do {
+            old_root = my_root;
+            t->next = old_root;
+        } while( my_root.compare_and_swap( t, old_root )!=old_root );
+        return *t;  
+    }
+
+    //! Destroy the bag and threads in it. 
+    ~private_thread_bag() {
+        while( my_root ) {
+            // Unlink thread from list.
+            list_thread* t = my_root;
+            my_root = t->next;
+            // Destroy and deallocate the thread.
+            t->~list_thread();
+            my_allocator.deallocate(static_cast<padded<list_thread>*>(t),1);    
+        }
+    }
+};
+
+//! Forward declaration
+void wakeup_some_tbb_threads();
+
+//! Type-independent part of class generic_connection. 
+/** One to one map from server threads to jobs, and associated reference counting. */
+class thread_map : public thread_map_base {
+public:
+    typedef rml::client::size_type size_type;
+    //! ctor
+    thread_map( wait_counter& fc, ::rml::client& client ) : 
+        all_visited_at_least_once(false), my_min_stack_size(0), my_server_ref_count(1),
+        my_client_ref_count(1), my_client(client), my_factory_counter(fc)
+    { my_unrealized_threads = 0; }
+    //! dtor
+    ~thread_map() {}
+    typedef array_type::iterator iterator;
+    iterator begin() {return my_array.begin();}
+    iterator end() {return my_array.end();}
+    void bind();
+    void unbind();
+    void assist_cleanup( bool assist_null_only );
+
+    /** Returns number of unrealized threads to create. */
+    size_type wakeup_tbb_threads( size_type n );
+    bool wakeup_next_thread( iterator i, tbb_connection_v2& conn );
+    void release_tbb_threads( server_thread* t );
+    void adjust_balance( int delta );
+
+    //! Add a server_thread object to the map, but do not bind it.
+    /** Return NULL if out of unrealized threads. */
+    value_type* add_one_thread( bool is_omp_thread_ );
+
+    void bind_one_thread( rml::server& server, value_type& x );
+
+    void remove_client_ref();
+    int add_server_ref() {return my_server_ref_count.add_ref();}
+    int remove_server_ref() {return my_server_ref_count.remove_ref();}
+
+    ::rml::client& client() const {return my_client;}
+
+    size_type get_unrealized_threads() { return my_unrealized_threads; }
+
+private:
+    private_thread_bag my_private_threads;
+    bool all_visited_at_least_once;
+    array_type my_array;
+    size_t my_min_stack_size;
+    tbb::atomic<size_type> my_unrealized_threads;
+
+    //! Number of threads referencing *this, plus one extra.
+    /** When it becomes zero, the containing server object can be safely deleted. */
+    ref_count my_server_ref_count;
+
+    //! Number of jobs that need cleanup, plus one extra.
+    /** When it becomes zero, acknowledge_close_connection is called. */
+    ref_count my_client_ref_count;
+
+    ::rml::client& my_client;
+    //! Counter owned by factory that produced this thread_map.
+    wait_counter& my_factory_counter;
+};
+
+void thread_map::bind_one_thread( rml::server& server, value_type& x ) {
+    // Add one to account for the thread referencing this map hereforth.
+    server_thread& t = x.thread();
+    my_server_ref_count.add_ref();
+    my_client_ref_count.add_ref();
+#if TBB_USE_ASSERT
+    __TBB_ASSERT( t.add_ref()==1, NULL );
+#else
+    t.add_ref();
+#endif
+    // Have responsibility to start the thread.
+    t.my_conn = &server;
+    t.my_ja = &x.my_automaton;
+    t.launch( my_min_stack_size );
+    /* Must wake thread up so it can fill in its "my_job" field in *this.
+       Otherwise deadlock can occur where wait_for_job spins on thread that is sleeping. */
+    __TBB_ASSERT( t.state!=ts_tbb_busy, NULL );
+    t.wakeup( ts_idle, ts_asleep );
+}
+
+thread_map::value_type* thread_map::add_one_thread( bool is_omp_thread_ ) {
+    size_type u;
+    do {
+        u = my_unrealized_threads;
+        if( !u ) return NULL;
+    } while( my_unrealized_threads.compare_and_swap(u-1,u)!=u );
+    server_thread& t = my_private_threads.add_one_thread();
+    t.is_omp_thread = is_omp_thread_;
+    __TBB_ASSERT( u>=1, NULL );
+    t.my_index = u - 1;
+    __TBB_ASSERT( t.state!=ts_tbb_busy, NULL );
+    t.my_extra_state = t.is_omp_thread ? ts_none : ts_created;
+
+    iterator i = t.my_map_pos = my_array.grow_by(1);
+    value_type& v = *i;
+    v.my_thread = &t;
+    return &v;
+}
+
+void thread_map::bind() { 
+    ++my_factory_counter;
+    my_min_stack_size = my_client.min_stack_size();
+    __TBB_ASSERT( my_unrealized_threads==0, "already called bind?" );
+    my_unrealized_threads = my_client.max_job_count();
+}
+
+void thread_map::unbind() {
+    // Ask each server_thread to cleanup its job for this server.
+    for( iterator i=begin(); i!=end(); ++i ) {
+        server_thread& t = i->thread();
+        t.terminate = true;
+        t.wakeup( ts_idle, ts_asleep );
+    }
+    // Remove extra ref to client.
+    remove_client_ref();
+}
+
+void thread_map::assist_cleanup( bool assist_null_only ) {
+    // To avoid deadlock, the current thread *must* help out with cleanups that have not started,
+    // becausd the thread that created the job may be busy for a long time.
+    for( iterator i = begin(); i!=end(); ++i ) {
+        rml::job* j=0;
+        job_automaton& ja = i->my_automaton;
+        if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) {
+            if( j ) {
+                my_client.cleanup(*j);
+            } else {
+                // server thread did not get a chance to create a job.
+            }
+            remove_client_ref();
+        } 
+    }
+}
+
+thread_map::size_type thread_map::wakeup_tbb_threads( size_type n ) {
+    __TBB_ASSERT(n>0,"must specify positive number of threads to wake up");
+    iterator e = end();
+    for( iterator k=begin(); k!=e; ++k ) {
+        // If another thread added *k, there is a tiny timing window where thread() is invalid.
+        server_thread& t = k->wait_for_thread();
+        thread_state_t thr_s = t.read_state();
+        if( t.read_extra_state()==ts_created || thr_s==ts_tbb_busy || thr_s==ts_done )
+            continue;
+        if( --the_balance>=0 ) { // try to withdraw a coin from the deposit
+            while( !t.try_grab_for( ts_tbb_busy ) ) {
+                thr_s = t.read_state();
+                if( thr_s==ts_tbb_busy || thr_s==ts_done ) {
+                    // we lost; move on to the next.
+                    ++the_balance;
+                    goto skip;
+                }
+            }
+            if( --n==0 ) 
+                return 0;
+        } else {
+            // overdraft.
+            ++the_balance;
+            break;
+        }
+skip:
+        ;
+    }
+    return n<my_unrealized_threads ? n : my_unrealized_threads;
+}
+#else /* RML_USE_WCRM */
+
+class thread_map : no_copy {
+    friend class omp_connection_v2;
+    typedef ::std::hash_map<uintptr_t,server_thread*> hash_map_type;
+    size_t my_min_stack_size;
+    size_t my_unrealized_threads;
+    ::rml::client& my_client;
+    //! Counter owned by factory that produced this thread_map.
+    wait_counter& my_factory_counter;
+    //! Ref counters
+    ref_count my_server_ref_count;
+    ref_count my_client_ref_count;
+    // FIXME: pad this?
+    hash_map_type my_map;
+    bool shutdown_in_progress;
+    std::vector<IExecutionResource*> original_exec_resources;
+    tbb::cache_aligned_allocator<padded<tbb_server_thread> > my_tbb_allocator; 
+    tbb::cache_aligned_allocator<padded<omp_server_thread> > my_omp_allocator; 
+    tbb::cache_aligned_allocator<padded<thread_scavenger_thread> > my_scavenger_allocator;
+    IResourceManager* my_concrt_resource_manager;
+    IScheduler* my_scheduler;
+    ISchedulerProxy* my_scheduler_proxy;
+    tbb::atomic<thread_scavenger_thread*> my_thread_scavenger_thread;
+#if TBB_USE_ASSERT
+    tbb::atomic<int> n_add_vp_requests;
+    tbb::atomic<int> n_thread_scavengers_created;
+#endif
+public:
+    thread_map( wait_counter& fc, ::rml::client& client ) : 
+        my_min_stack_size(0), my_client(client), my_factory_counter(fc),
+        my_server_ref_count(1), my_client_ref_count(1), shutdown_in_progress(false),
+        my_concrt_resource_manager(NULL), my_scheduler(NULL), my_scheduler_proxy(NULL)
+    { 
+        my_thread_scavenger_thread = NULL; 
+#if TBB_USE_ASSERT
+        n_add_vp_requests = 0;
+        n_thread_scavengers_created;
+#endif
+    }
+
+    ~thread_map() {
+        __TBB_ASSERT( n_thread_scavengers_created<=1, "too many scavenger thread created" );
+        // if thread_scavenger_thread is launched, wait for it to complete
+        if( my_thread_scavenger_thread ) {
+            __TBB_ASSERT( my_thread_scavenger_thread!=c_claimed, NULL );
+            while( my_thread_scavenger_thread->read_state()==ts_busy )
+                __TBB_Yield();
+            thread_scavenger_thread* tst = my_thread_scavenger_thread;
+            my_scavenger_allocator.deallocate(static_cast<padded<thread_scavenger_thread>*>(tst),1);
+        }
+        // deallocate thread contexts
+        for( hash_map_type::const_iterator hi=my_map.begin(); hi!=my_map.end(); ++hi ) {
+            server_thread* thr = hi->second;
+            if( thr->tbb_thread ) {
+                while( ((tbb_server_thread*)thr)->activation_count>1 )
+                    __TBB_Yield();
+                ((tbb_server_thread*)thr)->~tbb_server_thread();
+                my_tbb_allocator.deallocate(static_cast<padded<tbb_server_thread>*>(thr),1);    
+            } else {
+                ((omp_server_thread*)thr)->~omp_server_thread();
+                my_omp_allocator.deallocate(static_cast<padded<omp_server_thread>*>(thr),1);    
+            }
+        }
+        if( my_scheduler_proxy ) {
+            my_scheduler_proxy->Shutdown(); 
+            my_concrt_resource_manager->Release();
+            __TBB_ASSERT( my_scheduler, NULL );
+            delete my_scheduler;
+        } else {
+            __TBB_ASSERT( !my_scheduler, NULL );
+        }
+    }
+    typedef hash_map_type::key_type key_type;
+    typedef hash_map_type::value_type value_type;
+    typedef hash_map_type::iterator iterator;
+    iterator begin() {return my_map.begin();}
+    iterator end() {return my_map.end();}
+    iterator find( key_type k ) {return my_map.find( k );}
+    iterator insert( key_type k, server_thread* v ) { 
+        std::pair<iterator,bool> res = my_map.insert( value_type(k,v) );
+        return res.first;
+    }
+    void bind( IScheduler* s ) {
+        ++my_factory_counter;
+        if( s ) {
+            my_unrealized_threads = s->GetPolicy().GetPolicyValue( MaxConcurrency );
+            __TBB_ASSERT( my_unrealized_threads>0, NULL );
+            my_scheduler = s;
+            my_concrt_resource_manager = CreateResourceManager(); // reference count==3 when first created.
+            my_scheduler_proxy = my_concrt_resource_manager->RegisterScheduler( s, CONCRT_RM_VERSION_1 );
+            my_scheduler_proxy->RequestInitialVirtualProcessors( false );
+        }
+    }
+    bool is_closing() { return shutdown_in_progress; }
+    void unbind( rml::server& server, ::tbb::spin_mutex& mtx );
+    void add_client_ref() { my_server_ref_count.add_ref(); }
+    void remove_client_ref();
+    void add_server_ref() {my_server_ref_count.add_ref();}
+    int remove_server_ref() {return my_server_ref_count.remove_ref();}
+    int get_server_ref_count() { int k = my_server_ref_count.my_ref_count; return k; }
+    void assist_cleanup( bool assist_null_only );
+    void adjust_balance( int delta );
+    int current_balance() const {int k = the_balance; return k;}
+    ::rml::client& client() const {return my_client;}
+    void register_as_master( server::execution_resource_t& v ) const { (IExecutionResource*&)v = my_scheduler_proxy ? my_scheduler_proxy->SubscribeCurrentThread() : NULL; }
+    // Rremove() should be called from the same thread that subscribed the current h/w thread (i.e., the one that
+    // called register_as_master() ).
+    void unregister( server::execution_resource_t v ) const {if( v ) ((IExecutionResource*)v)->Remove( my_scheduler );}
+    void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx );
+    void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx );
+    void remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx );
+    void mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx );
+    void create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx );
+    void wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx );
+    void mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx );
+    inline void addto_original_exec_resources( IExecutionResource* r, ::tbb::spin_mutex& mtx ) { 
+        ::tbb::spin_mutex::scoped_lock lck(mtx);
+        __TBB_ASSERT( !is_closing(), "try to regster master while connection is being shutdown?" );
+        original_exec_resources.push_back( r ); 
+    }
+#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED
+    void allocate_thread_scavenger( IExecutionResource* v );
+#endif
+    inline thread_scavenger_thread* get_thread_scavenger() { return my_thread_scavenger_thread; }
+};
+
+garbage_connection_queue connections_to_reclaim;
+connection_scavenger_thread connection_scavenger;
+
+#endif /* !RML_USE_WCRM */
+
+//------------------------------------------------------------------------
+// generic_connection
+//------------------------------------------------------------------------
+
+template<typename Server, typename Client>
+struct connection_traits {};
+
+// head of the active tbb connections
+static tbb::atomic<uintptr_t> active_tbb_connections;
+static tbb::atomic<int> current_tbb_conn_readers;
+static size_t current_tbb_conn_reader_epoch;
+static tbb::atomic<size_t> close_tbb_connection_event_count;
+
+#if RML_USE_WCRM
+template<typename Connection>
+void make_job( Connection& c, server_thread& t );
+#endif
+
+template<typename Server, typename Client>
+class generic_connection: public Server, no_copy {
+    /*override*/ version_type version() const {return SERVER_VERSION;}
+    /*override*/ void yield() {thread_monitor::yield();}
+    /*override*/ void independent_thread_number_changed( int delta ) { my_thread_map.adjust_balance( -delta ); }
+    /*override*/ unsigned default_concurrency() const {return hardware_concurrency()-1;}
+    friend void wakeup_some_tbb_threads();
+    friend class connection_scavenger_thread;
+
+protected:
+    thread_map my_thread_map;
+    generic_connection* next_conn;
+    size_t my_ec;
+#if RML_USE_WCRM 
+    // FIXME: pad it?
+    tbb::spin_mutex map_mtx;
+    IScheduler* my_scheduler;
+    void do_open( IScheduler* s ) { 
+        my_scheduler = s; 
+        my_thread_map.bind( s );
+    }
+    bool is_closing() { return my_thread_map.is_closing(); }
+    void request_close_connection( bool existing );
+#else
+    void do_open() {my_thread_map.bind();}
+    void request_close_connection( bool );
+#endif /* RML_USE_WCRM */
+    //! Make destructor virtual
+    virtual ~generic_connection() {}
+#if !RML_USE_WCRM
+    generic_connection( wait_counter& fc, Client& c ) : my_thread_map(fc,c), next_conn(NULL), my_ec(0) {}
+#else
+    generic_connection( wait_counter& fc, Client& c ) : 
+            my_thread_map(fc,c), next_conn(NULL), my_ec(0), map_mtx(), my_scheduler(NULL) {}
+    void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count );
+    void remove_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count );
+    void notify_resources_externally_busy( IVirtualProcessorRoot** vprocs, unsigned int count ) { my_thread_map.mark_virtual_processors_as_lent( vprocs, count, map_mtx ); }
+    void notify_resources_externally_idle( IVirtualProcessorRoot** vprocs, unsigned int count ) {
+        my_thread_map.mark_virtual_processors_as_returned( vprocs, count, map_mtx );
+    }
+#endif /* !RML_USE_WCRM */
+
+public:
+    typedef Server server_type;
+    typedef Client client_type;
+    Client& client() const {return static_cast<Client&>(my_thread_map.client());}
+    void set_scratch_ptr( job& j, void* ptr ) { ::rml::server::scratch_ptr(j) = ptr; }
+#if RML_USE_WCRM
+    template<typename Connection>
+    friend void make_job( Connection& c, server_thread& t );
+    void add_server_ref ()   {my_thread_map.add_server_ref();}
+    void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;}
+    void add_client_ref ()   {my_thread_map.add_client_ref();}
+    void remove_client_ref() {my_thread_map.remove_client_ref();}
+#else /* !RML_USE_WCRM */
+    int  add_server_ref ()   {return my_thread_map.add_server_ref();}
+    void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;}
+    void remove_client_ref() {my_thread_map.remove_client_ref();}
+    void make_job( server_thread& t, job_automaton& ja );
+#endif /* RML_USE_WCRM */
+    static generic_connection* get_addr( uintptr_t addr_ex ) {
+        return reinterpret_cast<generic_connection*>( addr_ex&~(uintptr_t)3 );
+    }
+};
+
+//------------------------------------------------------------------------
+// TBB server
+//------------------------------------------------------------------------
+
+template<>
+struct connection_traits<tbb_server,tbb_client> {
+    static const bool assist_null_only = true;
+    static const bool is_tbb = true;
+};
+
+//! Represents a server/client binding.
+/** The internal representation uses inheritance for the server part and a pointer for the client part. */
+class tbb_connection_v2: public generic_connection<tbb_server,tbb_client> {
+    /*override*/ void adjust_job_count_estimate( int delta );
+#if !RML_USE_WCRM
+#if _WIN32||_WIN64
+    /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {}
+    /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {}
+#endif
+#else 
+    /*override*/ void register_master ( rml::server::execution_resource_t& v ) {
+        my_thread_map.register_as_master(v);
+        if( v ) ++nesting;
+    }
+    /*override*/ void unregister_master ( rml::server::execution_resource_t v ) {
+        if( v ) {
+            __TBB_ASSERT( nesting>0, NULL );
+            if( --nesting==0 ) {
+#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED
+                my_thread_map.allocate_thread_scavenger( (IExecutionResource*)v );
+#endif
+            }
+        }
+        my_thread_map.unregister(v); 
+    }
+    IScheduler* create_scheduler() {return( scheduler<tbb_connection_v2>::create( *this ) );}
+    friend void  free_all_connections( uintptr_t );
+    friend class scheduler<tbb_connection_v2>;
+    friend class execution_context;
+    friend class connection_scavenger_thread;
+#endif /* RML_USE_WCRM */
+    friend void wakeup_some_tbb_threads();
+    //! Estimate on number of jobs without threads working on them.
+    tbb::atomic<int> my_slack;
+    friend class dummy_class_to_shut_up_gratuitous_warning_from_gcc_3_2_3;
+#if TBB_USE_ASSERT
+    tbb::atomic<int> my_job_count_estimate;
+#endif /* TBB_USE_ASSERT */
+
+    tbb::atomic<int> n_adjust_job_count_requests;
+#if RML_USE_WCRM
+    tbb::atomic<int> nesting;
+#endif
+
+    // dtor
+    ~tbb_connection_v2();
+
+public:
+#if RML_USE_WCRM
+    typedef tbb_server_thread server_thread_type;
+#endif
+    //! True if there is slack that try_process can use.
+    bool has_slack() const {return my_slack>0;}
+
+#if RML_USE_WCRM
+    bool try_process( job& job )
+#else
+    bool try_process( server_thread& t, job& job )
+#endif
+    {
+        bool visited = false;
+        // No check for my_slack>0 here because caller is expected to do that check.
+        int k = --my_slack;
+        if( k>=0 ) {
+#if !RML_USE_WCRM
+            t.my_extra_state = ts_visited; // remember the thread paid a trip to process() at least once
+#endif
+            client().process(job);
+            visited = true;
+        }
+        ++my_slack; 
+        return visited;
+    }
+
+    tbb_connection_v2( wait_counter& fc, tbb_client& client ) : generic_connection<tbb_server,tbb_client>(fc,client)
+    {
+        my_slack = 0;
+#if RML_USE_WCRM
+        nesting = 0;
+#endif
+#if TBB_USE_ASSERT
+        my_job_count_estimate = 0;
+#endif /* TBB_USE_ASSERT */
+        __TBB_ASSERT( !my_slack, NULL );
+
+#if RML_USE_WCRM
+        do_open( client.max_job_count()>0 ? create_scheduler() : NULL );
+#else
+        do_open();
+#endif /* !RML_USE_WCRM */
+        n_adjust_job_count_requests = 0;
+
+        // Acquire head of active_tbb_connections & push the connection into the list
+        uintptr_t conn;
+        do {
+            for( ; (conn=active_tbb_connections)&1; )
+                __TBB_Yield();
+        } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn );
+
+        this->next_conn = generic_connection<tbb_server,tbb_client>::get_addr(conn);
+        // Update and release head of active_tbb_connections
+        active_tbb_connections = (uintptr_t) this; // set and release
+    }
+    inline void wakeup_tbb_threads( unsigned n ) {
+        my_thread_map.wakeup_tbb_threads( n
+#if RML_USE_WCRM
+                , map_mtx
+#endif
+                );
+    }
+#if RML_USE_WCRM
+    inline int get_nesting_level() { return nesting; }
+#else
+    inline bool wakeup_next_thread( thread_map::iterator i ) {return my_thread_map.wakeup_next_thread( i, *this );}
+    inline thread_map::size_type get_unrealized_threads () {return my_thread_map.get_unrealized_threads();}
+#endif /* !RML_USE_WCRM */
+};
+
+//------------------------------------------------------------------------
+// OpenMP server
+//------------------------------------------------------------------------
+
+template<>
+struct connection_traits<omp_server,omp_client> {
+    static const bool assist_null_only = false;
+    static const bool is_tbb = false;
+};
+
+class omp_connection_v2: public generic_connection<omp_server,omp_client> {
+#if !RML_USE_WCRM
+    /*override*/ int  current_balance() const {return the_balance;}
+#else
+    friend void  free_all_connections( uintptr_t );
+    friend class scheduler<omp_connection_v2>;
+    /*override*/ int current_balance() const {return my_thread_map.current_balance();}
+#endif /* !RML_USE_WCRM */
+    /*override*/ int  try_increase_load( size_type n, bool strict ); 
+    /*override*/ void decrease_load( size_type n );
+    /*override*/ void get_threads( size_type request_size, void* cookie, job* array[] );
+#if !RML_USE_WCRM
+#if _WIN32||_WIN64
+    /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {}
+    /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {}
+#endif
+#else
+    /*override*/ void register_master ( rml::server::execution_resource_t& v ) {
+        my_thread_map.register_as_master( v );
+        my_thread_map.addto_original_exec_resources( (IExecutionResource*)v, map_mtx );
+    }
+    /*override*/ void unregister_master ( rml::server::execution_resource_t v ) { my_thread_map.unregister(v); }
+#endif /* !RML_USE_WCRM */
+#if _WIN32||_WIN64
+    /*override*/ void deactivate( rml::job* j );
+    /*override*/ void reactivate( rml::job* j );
+#endif /* _WIN32||_WIN64 */
+#if RML_USE_WCRM
+public:
+    typedef omp_server_thread server_thread_type;
+private:
+    IScheduler* create_scheduler() {return( scheduler<omp_connection_v2>::create( *this ) );}
+#endif /* RML_USE_WCRM */
+public:
+#if TBB_USE_ASSERT
+    //! Net change in delta caused by this connection.
+    /** Should be zero when connection is broken */
+    tbb::atomic<int> net_delta;
+#endif /* TBB_USE_ASSERT */
+
+    omp_connection_v2( wait_counter& fc, omp_client& client ) : generic_connection<omp_server,omp_client>(fc,client) {
+#if TBB_USE_ASSERT
+        net_delta = 0;
+#endif /* TBB_USE_ASSERT */
+#if RML_USE_WCRM
+        do_open( create_scheduler() );
+#else
+        do_open();
+#endif /* RML_USE_WCRM */
+    }
+    ~omp_connection_v2() {__TBB_ASSERT( net_delta==0, "net increase/decrease of load is nonzero" );}
+};
+
+#if !RML_USE_WCRM
+/* to deal with cases where the machine is oversubscribed; we want each thread to trip to try_process() at least once */
+/* this should not involve computing the_balance */
+bool thread_map::wakeup_next_thread( thread_map::iterator this_thr, tbb_connection_v2& conn ) {
+    if( all_visited_at_least_once ) 
+        return false;
+
+    iterator e = end();
+retry:
+    bool exist = false;
+    iterator k=this_thr; 
+    for( ++k; k!=e; ++k ) {
+        // If another thread added *k, there is a tiny timing window where thread() is invalid.
+        server_thread& t = k->wait_for_thread();
+        if( t.my_extra_state!=ts_visited )
+            exist = true;
+        if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started )
+            if( t.try_grab_for( ts_tbb_busy ) )
+                return true;
+    }
+    for( k=begin(); k!=this_thr; ++k ) {
+        server_thread& t = k->wait_for_thread();
+        if( t.my_extra_state!=ts_visited )
+            exist = true;
+        if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started )
+            if( t.try_grab_for( ts_tbb_busy ) )
+                return true;
+    }
+
+    if( exist ) 
+        if( conn.has_slack() )
+            goto retry;
+    else 
+        all_visited_at_least_once = true;
+    return false;
+}
+
+void thread_map::release_tbb_threads( server_thread* t ) {
+    for( ; t; t = t->link ) {
+        while( t->read_state()!=ts_asleep )
+            __TBB_Yield();
+        t->my_extra_state = ts_started;
+    }
+}
+#endif /* !RML_USE_WCRM */
+
+void thread_map::adjust_balance( int delta ) {
+    int new_balance = the_balance += delta;
+    if( new_balance>0 && 0>=new_balance-delta /*== old the_balance*/ )
+        wakeup_some_tbb_threads();
+}
+
+void thread_map::remove_client_ref() {
+    int k = my_client_ref_count.remove_ref();
+    if( k==0 ) {
+        // Notify factory that thread has crossed back into RML.
+        --my_factory_counter;
+        // Notify client that RML is done with the client object.
+        my_client.acknowledge_close_connection();
+    } 
+}
+
+#if RML_USE_WCRM
+/** Not a member of generic_connection because we need Connection to be the derived class. */
+template<typename Connection>
+void make_job( Connection& c, typename Connection::server_thread_type& t ) {
+    if( t.my_job_automaton.try_acquire() ) {
+        rml::job& j = *t.my_client.create_one_job();
+        __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" );
+        __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" );
+        t.my_job_automaton.set_and_release( j );
+        c.set_scratch_ptr( j, (void*) &t );
+    }
+}
+#endif /* RML_USE_WCRM */
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+// Suppress "conditional expression is constant" warning.
+#pragma warning( push )
+#pragma warning( disable: 4127 ) 
+#endif
+#if RML_USE_WCRM
+template<typename Server, typename Client>    
+void generic_connection<Server,Client>::request_close_connection( bool exiting ) {
+    // for TBB connections, exiting should always be false
+    if( connection_traits<Server,Client>::is_tbb )
+        __TBB_ASSERT( !exiting, NULL);
+#if TBB_USE_ASSERT
+    else if( exiting )
+        reinterpret_cast<omp_connection_v2*>(this)->net_delta = 0;
+#endif
+    if( exiting ) {
+        uintptr_t tail = connections_to_reclaim.tail;
+        while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail )
+            __TBB_Yield();
+        my_thread_map.unbind( *this, map_mtx );
+        my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only );
+        // It is assumed that the client waits for all other threads to terminate before
+        // calling request_close_connection with true.  Thus, it is safe to return all
+        // outstanding connection objects that are reachable. It is possible that there may
+        // be some unreachable connection objects lying somewhere.
+        free_all_connections( connection_scavenger.grab_and_prepend( this ) );
+        return;
+    }
+#else /* !RML_USE_WCRM */
+template<typename Server, typename Client>    
+void generic_connection<Server,Client>::request_close_connection( bool ) {
+#endif /* RML_USE_WCRM */
+    if( connection_traits<Server,Client>::is_tbb ) {
+        // acquire the head of active tbb connections
+        uintptr_t conn;
+        do {
+            for( ; (conn=active_tbb_connections)&1; )
+                __TBB_Yield();
+        } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn );
+
+        // Locate the current connection
+        generic_connection* pred_conn = NULL;
+        generic_connection* curr_conn = (generic_connection*) conn;
+        for( ; curr_conn && curr_conn!=this; curr_conn=curr_conn->next_conn )
+            pred_conn = curr_conn;
+        __TBB_ASSERT( curr_conn==this, "the current connection is not in the list?" );
+
+        // Remove this from the list
+        if( pred_conn ) {
+            pred_conn->next_conn = curr_conn->next_conn;
+            active_tbb_connections = reinterpret_cast<uintptr_t>(generic_connection<tbb_server,tbb_client>::get_addr(active_tbb_connections)); // release it
+        } else
+            active_tbb_connections = (uintptr_t) curr_conn->next_conn; // update & release it
+        curr_conn->next_conn = NULL;
+        // Increment the tbb connection close event count
+        my_ec = ++close_tbb_connection_event_count;
+        // Wait happens in tbb_connection_v2::~tbb_connection_v2()
+    }
+#if RML_USE_WCRM
+    my_thread_map.unbind( *this, map_mtx );
+    my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only );
+    connection_scavenger.add_request( this );
+#else
+    my_thread_map.unbind();
+    my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only );
+    // Remove extra reference
+    remove_server_ref();
+#endif
+}
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning( pop )
+#endif
+
+#if RML_USE_WCRM
+
+template<typename Server, typename Client>    
+void generic_connection<Server,Client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count )
+{}
+
+template<>    
+void generic_connection<tbb_server,tbb_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count )
+{
+    my_thread_map.add_virtual_processors( vproots, count, (tbb_connection_v2&)*this, map_mtx );
+}
+template<>    
+void generic_connection<omp_server,omp_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count )
+{
+    // For OMP, since it uses ScheudlerPolicy of MinThreads==MaxThreads, this is called once when 
+    // RequestInitialVirtualProcessors() is  called.
+    my_thread_map.add_virtual_processors( vproots, count, (omp_connection_v2&)*this, map_mtx );
+}
+
+template<typename Server, typename Client>
+void generic_connection<Server,Client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count )
+{
+    __TBB_ASSERT( false, "should not be called" );
+}
+/* For OMP, RemoveVirtualProcessors() will never be called. */
+
+template<>    
+void generic_connection<tbb_server,tbb_client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count )
+{
+    my_thread_map.remove_virtual_processors( vproots, count, map_mtx );
+}
+
+void tbb_connection_v2::adjust_job_count_estimate( int delta ) {
+#if TBB_USE_ASSERT
+    my_job_count_estimate += delta;
+#endif /* TBB_USE_ASSERT */
+    // Atomically update slack.
+    int c = my_slack+=delta;
+    if( c>0 ) {
+        ++n_adjust_job_count_requests;
+        my_thread_map.wakeup_tbb_threads( c, map_mtx );
+        --n_adjust_job_count_requests;
+    }
+}
+#endif /* RML_USE_WCRM */
+
+tbb_connection_v2::~tbb_connection_v2() {
+#if TBB_USE_ASSERT
+    if( my_job_count_estimate!=0 ) {
+        fprintf(stderr, "TBB client tried to disconnect with non-zero net job count estimate of %d\n", int(my_job_count_estimate ));
+        abort();
+    }
+    __TBB_ASSERT( !my_slack, "attempt to destroy tbb_server with nonzero slack" );
+    __TBB_ASSERT( this!=static_cast<tbb_connection_v2*>(generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)), "request_close_connection() must be called" );
+#endif /* TBB_USE_ASSERT */
+#if !RML_USE_WCRM
+    // If there are other threads ready for work, give them coins
+    if( the_balance>0 )
+        wakeup_some_tbb_threads();
+#endif
+    // Someone might be accessing my data members
+    while( current_tbb_conn_readers>0 && (ptrdiff_t)(my_ec-current_tbb_conn_reader_epoch)>0 )
+        __TBB_Yield();
+}
+
+#if !RML_USE_WCRM
+template<typename Server, typename Client>
+void generic_connection<Server,Client>::make_job( server_thread& t, job_automaton& ja ) {
+    if( ja.try_acquire() ) {
+        rml::job& j = *client().create_one_job();
+        __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" );
+        __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" );
+        ja.set_and_release( j );
+        __TBB_ASSERT( t.my_conn && t.my_ja && t.my_job==NULL, NULL );
+        t.my_job  = &j;
+        set_scratch_ptr( j, (void*) &t );
+    }
+}
+
+void tbb_connection_v2::adjust_job_count_estimate( int delta ) {
+#if TBB_USE_ASSERT
+    my_job_count_estimate += delta;
+#endif /* TBB_USE_ASSERT */
+    // Atomically update slack.
+    int c = my_slack+=delta;
+    if( c>0 ) {
+        ++n_adjust_job_count_requests;
+        // The client has work to do and there are threads available
+        thread_map::size_type n = my_thread_map.wakeup_tbb_threads(c); 
+
+        server_thread* new_threads_anchor = NULL;
+        thread_map::size_type i;
+        for( i=0; i<n; ++i ) {
+            // Obtain unrealized threads
+            thread_map::value_type* k = my_thread_map.add_one_thread( false );
+            if( !k ) 
+                // No unrealized threads left.
+                break;
+            // Eagerly start the thread off.
+            my_thread_map.bind_one_thread( *this, *k );
+            server_thread& t = k->thread();
+            __TBB_ASSERT( !t.link, NULL );
+            t.link = new_threads_anchor;
+            new_threads_anchor = &t;
+        }
+
+        thread_map::size_type j=0; 
+        for( ; the_balance>0 && j<i; ++j ) {
+            if( --the_balance>=0 ) {
+                // Withdraw a coin from the bank
+                __TBB_ASSERT( new_threads_anchor, NULL );
+
+                server_thread* t = new_threads_anchor;
+                new_threads_anchor = t->link;
+                while( !t->try_grab_for( ts_tbb_busy ) ) 
+                    __TBB_Yield();
+                t->my_extra_state = ts_started;
+            } else {
+                // Overdraft. return it to the bank
+                ++the_balance;
+                break;
+            }
+        }
+        __TBB_ASSERT( i-j!=0||new_threads_anchor==NULL, NULL );
+        // Mark the ones that did not get started as eligible for being snatched.
+        if( new_threads_anchor )
+            my_thread_map.release_tbb_threads( new_threads_anchor );
+
+        --n_adjust_job_count_requests;
+    }
+}
+#endif /* RML_USE_WCRM */
+
+#if RML_USE_WCRM
+int omp_connection_v2::try_increase_load( size_type n, bool strict ) {
+    __TBB_ASSERT(int(n)>=0,NULL);
+    if( strict ) {
+        the_balance -= int(n);
+    } else {
+        int avail, old;
+        do {
+            avail = the_balance;
+            if( avail<=0 ) {
+                // No atomic read-write-modify operation necessary.
+                return avail;
+            }
+            // Don't read the_system_balance; if it changes, compare_and_swap will fail anyway.
+            old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail );
+        } while( old!=avail );
+        if( int(n)>avail ) 
+            n=avail;
+    }
+#if TBB_USE_ASSERT
+    net_delta += n;
+#endif /* TBB_USE_ASSERT */
+    return n;
+}
+
+void omp_connection_v2::decrease_load( size_type /*n*/ ) {}
+
+void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) {
+    unsigned index = 0;
+    std::vector<omp_server_thread*> enlisted(request_size);
+    std::vector<thread_grab_t> to_activate(request_size);
+
+    if( request_size==0 ) return;
+
+    {
+        tbb::spin_mutex::scoped_lock lock(map_mtx);
+
+        __TBB_ASSERT( !is_closing(), "try to get threads while connection is being shutdown?" );
+
+        for( int scan=0; scan<2; ++scan ) {
+            for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) {
+                omp_server_thread* thr = (omp_server_thread*) (*i).second;
+                // in the first scan, skip VPs that are lent
+                if( scan==0 && thr->is_lent() ) continue;
+                thread_grab_t res = thr->try_grab_for();
+                if( res!=wk_failed ) {// && if is not busy by some other scheduler
+                    to_activate[index] = res;
+                    enlisted[index] = thr;
+                    if( ++index==request_size )
+                        goto activate_threads;
+                }
+            }
+        }
+    }
+
+activate_threads:
+
+    for( unsigned i=0; i<index; ++i ) {
+        omp_server_thread* thr = enlisted[i];
+        if( to_activate[i]==wk_from_asleep )
+            thr->get_virtual_processor()->Activate( thr );
+        job* j = thr->wait_for_job();
+        array[i] = j;
+        thr->omp_data.produce( client(), *j, cookie, i PRODUCE_ARG(*this) );
+    }
+
+    if( index==request_size )
+        return;
+
+    // If we come to this point, it must be becuase dynamic==false
+    // Create Oversubscribers..
+
+    // Note that our policy is such that MinConcurrency==MaxConcurrency.
+    // RM will deliver MaxConcurrency of VirtualProcessors and no more.
+    __TBB_ASSERT( request_size>index, NULL );
+    unsigned n = request_size - index;
+    std::vector<server_thread*> thr_vec(n);
+    typedef std::vector<server_thread*>::iterator iterator_thr;
+    my_thread_map.create_oversubscribers( n, thr_vec, *this, map_mtx );
+    for( iterator_thr ti=thr_vec.begin(); ti!=thr_vec.end(); ++ti ) {
+        omp_server_thread* thr = (omp_server_thread*) *ti;
+        __TBB_ASSERT( thr, "thread not created?" );
+        // Thread is already grabbed; since it is nrewly created, we need to activate it.
+        thr->get_virtual_processor()->Activate( thr );
+        job* j = thr->wait_for_job();
+        array[index] = j;
+        thr->omp_data.produce( client(), *j, cookie, index PRODUCE_ARG(*this) );
+        ++index;
+    }
+}
+
+#if _WIN32||_WIN64
+void omp_connection_v2::deactivate( rml::job* j )
+{
+    my_thread_map.adjust_balance(1);
+#if TBB_USE_ASSERT
+    net_delta -= 1;
+#endif
+    omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j );
+    (thr->get_virtual_processor())->Deactivate( thr );
+}
+
+void omp_connection_v2::reactivate( rml::job* j )
+{
+    // Should not adjust the_balance because OMP client is supposed to 
+    // do try_increase_load() to reserve the threads to use.
+    omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j );
+    (thr->get_virtual_processor())->Activate( thr );
+}
+#endif /* !_WIN32||_WIN64 */
+
+#endif  /* RML_USE_WCRM */
+
+//! Wake up some available tbb threads
+void wakeup_some_tbb_threads()
+{
+    /* First, atomically grab the connection, then increase the server ref count to keep 
+       it from being released prematurely.  Second, check if the balance is available for TBB 
+       and the tbb conneciton has slack to exploit.  If the answer is true, go ahead and 
+       try to wake some up. */
+    if( generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)==0 )
+        // the next connection will see the change; return.
+        return;
+
+start_it_over:
+    int n_curr_readers = ++current_tbb_conn_readers;
+    if( n_curr_readers>1 ) // I lost
+        return;
+    // if n_curr_readers==1, i am the first one, so I will take responsibility for waking tbb threads up.
+
+    // update the current epoch
+    current_tbb_conn_reader_epoch = close_tbb_connection_event_count;
+
+    // read and clear
+    // Newly added connection will not invalidate the pointer, and it will
+    // compete with the current one to claim coins.
+    // One that is about to close the connection increments the event count
+    // after it removes the connection from the list.  But it will keep around
+    // the connection until all readers including this one catch up. So, reading 
+    // the head and clearing the lock bit should be o.k.
+    generic_connection<tbb_server,tbb_client>* next_conn_wake_up = generic_connection<tbb_server,tbb_client>::get_addr( active_tbb_connections );
+
+    for( ; next_conn_wake_up; ) {
+        /* some threads are creating tbb server threads; they may not see my changes made to the_balance */
+        /* When a thread is in adjust_job_count_estimate() to increase the slack
+           RML tries to activate worker threads on behalf of the requesting thread
+           by repeatedly drawing a coin from the bank optimistically and grabbing a 
+           thread.  If it finds the bank overdrafted, it returns the coin back to 
+           the bank and returns the control to the thread (return from the method).
+           There lies a tiny timing hole.  
+
+           When the overdraft occurs (note that multiple masters may be in
+           adjust_job_count_estimate() so the_balance can be any negative value) and
+           a worker returns from the TBB work at that moment, its returning the coin 
+           does not bump up the_balance over 0, so it happily returns from
+           wakeup_some_tbb_threads() without attempting to give coins to worker threads
+           that are ready.
+        */
+        while( ((tbb_connection_v2*)next_conn_wake_up)->n_adjust_job_count_requests>0 )
+            __TBB_Yield();
+
+        int bal = the_balance;
+        n_curr_readers = current_tbb_conn_readers; // get the snapshot
+        if( bal<=0 ) break;
+        // if the connection is deleted, the following will immediately return because its slack would be 0 or less.
+
+        tbb_connection_v2* tbb_conn = (tbb_connection_v2*)next_conn_wake_up;
+        int my_slack = tbb_conn->my_slack;
+        if( my_slack>0 ) tbb_conn->wakeup_tbb_threads( my_slack );
+        next_conn_wake_up = next_conn_wake_up->next_conn; 
+    }
+
+    int delta = current_tbb_conn_readers -= n_curr_readers;
+    //if delta>0, more threads entered the routine since this one took the snapshot
+    if( delta>0 ) {
+        current_tbb_conn_readers = 0;
+        if( the_balance>0 && generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)!=0 )
+            goto start_it_over;
+    }
+
+    // Signal any connection that is waiting for me to complete my access that I am done.
+    current_tbb_conn_reader_epoch = close_tbb_connection_event_count;
+}
+
+#if !RML_USE_WCRM
+int omp_connection_v2::try_increase_load( size_type n, bool strict ) {
+    __TBB_ASSERT(int(n)>=0,NULL);
+    if( strict ) {
+        the_balance -= int(n);
+    } else {
+        int avail, old;
+        do {
+            avail = the_balance;
+            if( avail<=0 ) {
+                // No atomic read-write-modify operation necessary.
+                return avail;
+            }
+            // don't read the_balance; if it changes, compare_and_swap will fail anyway.
+            old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail );
+        } while( old!=avail );
+        if( int(n)>avail ) 
+            n=avail;
+    }
+#if TBB_USE_ASSERT
+    net_delta += n;
+#endif /* TBB_USE_ASSERT */
+    return n;
+}
+
+void omp_connection_v2::decrease_load( size_type n ) {
+    __TBB_ASSERT(int(n)>=0,NULL);
+    my_thread_map.adjust_balance(int(n));
+#if TBB_USE_ASSERT
+    net_delta -= n;
+#endif /* TBB_USE_ASSERT */
+}
+
+void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) {
+
+    if( !request_size ) 
+        return;
+
+    unsigned index = 0;
+    for(;;) { // don't return until all request_size threads are grabbed.
+        // Need to grab some threads
+        thread_map::iterator k_end=my_thread_map.end();
+        for( thread_map::iterator k=my_thread_map.begin(); k!=k_end; ++k ) {
+            // If another thread added *k, there is a tiny timing window where thread() is invalid.
+            server_thread& t = k->wait_for_thread();
+            if( t.try_grab_for( ts_omp_busy ) ) {
+                // The preincrement instead of post-increment of index is deliberate.
+                job& j = k->wait_for_job();
+                array[index] = &j;
+                t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) );
+                if( ++index==request_size ) 
+                    return;
+            } 
+        }
+        // Need to allocate more threads
+        for( unsigned i=index; i<request_size; ++i ) {
+            __TBB_ASSERT( index<request_size, NULL );
+            thread_map::value_type* k = my_thread_map.add_one_thread( true );
+#if TBB_USE_ASSERT
+            if( !k ) {
+                // Client erred
+                __TBB_ASSERT(false, "server::get_threads: exceeded job_count\n");
+            }
+#endif
+            my_thread_map.bind_one_thread( *this, *k );
+            server_thread& t = k->thread();
+            if( t.try_grab_for( ts_omp_busy ) ) {
+                job& j = k->wait_for_job();
+                array[index] = &j;
+                // The preincrement instead of post-increment of index is deliberate.
+                t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) );
+                if( ++index==request_size ) 
+                    return;
+            } // else someone else snatched it.
+        }
+    }
+}
+#endif /* !RML_USE_WCRM */
+
+//------------------------------------------------------------------------
+// Methods of omp_dispatch_type
+//------------------------------------------------------------------------
+void omp_dispatch_type::consume() {
+    job_type* j = job; 
+    // Wait for short window between when master sets state of this thread to ts_omp_busy
+    // and master thread calls produce.
+    if( !j ) {
+        tbb::internal::atomic_backoff bo;
+        do {
+            bo.pause();
+            j = job;
+        } while( !j );
+    }
+    job = static_cast<job_type*>(NULL);
+    client->process(*j,cookie,index);
+#if TBB_USE_ASSERT
+    // Return of method process implies "decrease_load" from client's viewpoint, even though
+    // the actual adjustment of the_balance only happens when this thread really goes to sleep.
+    --server->net_delta;
+#endif /* TBB_USE_ASSERT */
+}
+
+#if !RML_USE_WCRM
+#if _WIN32||_WIN64
+void omp_connection_v2::deactivate( rml::job* j )
+{
+#if TBB_USE_ASSERT
+    net_delta -= 1;
+#endif
+    __TBB_ASSERT( j, NULL );
+    server_thread* thr = (server_thread*) scratch_ptr( *j );
+    thr->deactivate();
+}
+
+void omp_connection_v2::reactivate( rml::job* j )
+{
+    // Should not adjust the_balance because OMP client is supposed to 
+    // do try_increase_load() to reserve the threads to use.
+    __TBB_ASSERT( j, NULL );
+    server_thread* thr = (server_thread*) scratch_ptr( *j );
+    thr->reactivate();
+}
+#endif /* _WIN32||_WIN64 */
+
+//------------------------------------------------------------------------
+// Methods of server_thread
+//------------------------------------------------------------------------
+
+server_thread::server_thread() : 
+    ref_count(0),
+    link(NULL),
+    my_map_pos(),
+    my_conn(NULL), my_job(NULL), my_ja(NULL)
+{
+    state = ts_idle;
+    terminate = false;
+#if TBB_USE_ASSERT
+    has_active_thread = false;
+#endif /* TBB_USE_ASSERT */
+}
+
+server_thread::~server_thread() {
+    __TBB_ASSERT( !has_active_thread, NULL );
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced
+    #pragma warning(push)
+    #pragma warning(disable:4189)
+#endif
+__RML_DECL_THREAD_ROUTINE server_thread::thread_routine( void* arg ) {
+    server_thread* self = static_cast<server_thread*>(arg);
+    AVOID_64K_ALIASING( self->my_index );
+#if TBB_USE_ASSERT
+    __TBB_ASSERT( !self->has_active_thread, NULL );
+    self->has_active_thread = true;
+#endif /* TBB_USE_ASSERT */
+    self->loop();
+    return 0;
+}
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning(pop)
+#endif
+
+void server_thread::launch( size_t stack_size ) {
+    thread_monitor::launch( thread_routine, this, stack_size );
+}
+
+void server_thread::sleep_perhaps( thread_state_t asleep ) {
+    if( terminate ) return;
+    __TBB_ASSERT( asleep==ts_asleep, NULL );
+    thread_monitor::cookie c; 
+    monitor.prepare_wait(c);
+    if( state.compare_and_swap( asleep, ts_idle )==ts_idle ) {
+        if( !terminate ) {
+            monitor.commit_wait(c);
+            // Someone else woke me up.  The compare_and_swap further below deals with spurious wakeups.
+        } else {
+            monitor.cancel_wait();
+        }
+        thread_state_t s = read_state();
+        if( s==ts_asleep ) {
+            state.compare_and_swap( ts_idle, ts_asleep );
+            // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup.
+        } else {
+            // Someone else woke me up; there the_balance is decremented by 1. -- tbb only
+            if( !is_omp_thread ) {
+                __TBB_ASSERT( s==ts_tbb_busy||s==ts_idle, NULL );
+            }
+        }
+    } else {
+        // someone else made it busy ; see try_grab_for when state==ts_idle.
+        __TBB_ASSERT( state==ts_omp_busy||state==ts_tbb_busy, NULL );
+        monitor.cancel_wait();
+    }
+    __TBB_ASSERT( read_state()!=asleep, "a thread can only put itself to sleep" );
+}
+
+bool server_thread::wakeup( thread_state_t to, thread_state_t from ) {
+    bool success = false;
+    __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_omp_busy||to==ts_tbb_busy), NULL );
+    if( state.compare_and_swap( to, from )==from ) {
+        if( !is_omp_thread ) __TBB_ASSERT( to==ts_idle||to==ts_tbb_busy, NULL );
+        // There is a small timing window that permits balance to become negative,
+        // but such occurrences are probably rare enough to not worry about, since
+        // at worst the result is slight temporary oversubscription.
+        monitor.notify();
+        success = true;
+    } 
+    return success;
+}
+
+//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. 
+bool server_thread::try_grab_for( thread_state_t target_state ) {
+    bool success = false;
+    switch( read_state() ) {
+        case ts_asleep: 
+            success = wakeup( target_state, ts_asleep );
+            break;
+        case ts_idle:
+            success = state.compare_and_swap( target_state, ts_idle )==ts_idle;
+            break;
+        default:
+            // Thread is not available to be part of an OpenMP thread team.
+            break;
+    }
+    return success;
+}
+
+#if _WIN32||_WIN64
+void server_thread::deactivate() {
+    thread_state_t es = (thread_state_t) my_extra_state.fetch_and_store( ts_deactivated );
+    __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" );
+    if( es==ts_none ) 
+        state = ts_idle;
+    else
+        __TBB_ASSERT( es==ts_reactivated, "Cannot call deactivate() while in ts_deactivated" );
+        // only the thread can transition itself from ts_deactivted to ts_none
+    __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" );
+    my_extra_state = ts_none; // release the critical section
+    int bal = ++the_balance;
+    if( bal>0 )
+        wakeup_some_tbb_threads();
+    if( es==ts_none )
+        sleep_perhaps( ts_asleep );
+}
+
+void server_thread::reactivate() {
+    thread_state_t es;
+    do {
+        while( (es=read_extra_state())==ts_deactivated )
+            __TBB_Yield();
+        if( es==ts_reactivated ) {
+            __TBB_ASSERT( false, "two Reactivate() calls in a row.  Should not happen" );
+            return;
+        }
+        __TBB_ASSERT( es==ts_none, NULL );
+    } while( (thread_state_t)my_extra_state.compare_and_swap( ts_reactivated, ts_none )!=ts_none );
+    if( state!=ts_omp_busy ) {
+        my_extra_state = ts_none;
+        while( !try_grab_for( ts_omp_busy ) )
+            __TBB_Yield();
+    }
+}
+#endif /* _WIN32||_WIN64 */
+
+
+template<typename Connection>
+bool server_thread::destroy_job( Connection& c ) {
+    __TBB_ASSERT( !is_omp_thread||(state==ts_idle||state==ts_omp_busy), NULL );
+    __TBB_ASSERT(  is_omp_thread||(state==ts_idle||state==ts_tbb_busy), NULL );
+    if( !is_omp_thread ) {
+        __TBB_ASSERT( state==ts_idle||state==ts_tbb_busy, NULL );
+        if( state==ts_idle )
+            state.compare_and_swap( ts_done, ts_idle );
+        // 'state' may be set to ts_tbb_busy by another thread.
+
+        if( state==ts_tbb_busy ) { // return the coin to the deposit
+            // need to deposit first to let the next connection see the change
+            ++the_balance;
+            state = ts_done; // no other thread changes the state when it is ts_*_busy
+        }
+    }
+    if( job_automaton* ja = my_ja ) {
+        rml::job* j;
+        if( ja->try_plug(j) ) {
+            __TBB_ASSERT( j, NULL );
+            c.client().cleanup(*j);
+            c.remove_client_ref();
+        } else {
+            // Some other thread took responsibility for cleaning up the job.
+        }
+    }
+    // Must do remove client reference first, because execution of 
+    // c.remove_ref() can cause *this to be destroyed.
+    int k = remove_ref();
+    __TBB_ASSERT_EX( k==0, "more than one references?" );
+#if TBB_USE_ASSERT
+    has_active_thread = false;
+#endif /* TBB_USE_ASSERT */
+    c.remove_server_ref();
+    return true;
+}
+
+bool server_thread::do_termination() {
+    if( is_omp_thread )
+        return destroy_job( *static_cast<omp_connection_v2*>(my_conn) );
+    else
+        return destroy_job( *static_cast<tbb_connection_v2*>(my_conn) );
+}
+
+//! Loop that each thread executes
+void server_thread::loop() {
+    if( is_omp_thread )
+        static_cast<omp_connection_v2*>(my_conn)->make_job( *this, *my_ja );
+    else
+        static_cast<tbb_connection_v2*>(my_conn)->make_job( *this, *my_ja );
+    for(;;) {
+        __TBB_Yield();
+        if( state==ts_idle )
+            sleep_perhaps( ts_asleep );   
+
+        // Check whether I should quit.
+        if( terminate )
+            if( do_termination() )
+                return;     
+             
+        // read the state 
+        thread_state_t s = read_state();
+        __TBB_ASSERT( s==ts_idle||s==ts_omp_busy||s==ts_tbb_busy, NULL );
+
+        if( s==ts_omp_busy ) {
+            // Enslaved by OpenMP team.  
+            omp_dispatch.consume();
+            /* here wake tbb threads up if feasible */
+            if( ++the_balance>0 )
+                wakeup_some_tbb_threads();
+            state = ts_idle;
+        } else if( s==ts_tbb_busy ) {
+            // do some TBB work.
+            __TBB_ASSERT( my_conn && my_job, NULL );
+            tbb_connection_v2& conn = *static_cast<tbb_connection_v2*>(my_conn);
+            // give openmp higher priority
+            bool has_coin = true;
+            if( conn.has_slack() ) {
+                // it has the coin, it should trip to the scheduler at least once as long as its slack is positive
+                do {
+                    if( conn.try_process( *this, *my_job ) )
+                        if( conn.has_slack() && the_balance>=0 )
+                            has_coin = !conn.wakeup_next_thread( my_map_pos );
+                } while( has_coin && conn.has_slack() && the_balance>=0 );
+            }
+            state = ts_idle;
+            if( has_coin ) {
+                ++the_balance; // return the coin back to the deposit
+                if( conn.has_slack() ) { // a new adjust_job_request_estimate() is in progress
+                                         // it may have missed my changes to state and/or the_balance
+                    if( --the_balance>=0 ) { // try to grab the coin back
+                        // I got the coin
+                        if( state.compare_and_swap( ts_tbb_busy, ts_idle )!=ts_idle )
+                            ++the_balance; // someone else enlisted me.
+                    } else {
+                        // overdraft. return the coin
+                        ++the_balance;
+                    }
+                } // else the new request will see my changes to state & the_balance.
+            }
+            /* here wake tbb threads up if feasible */
+            if( the_balance>0 )
+                wakeup_some_tbb_threads();
+        }
+    }
+}
+#endif /* !RML_USE_WCRM */
+
+#if RML_USE_WCRM
+
+class tbb_connection_v2;
+class omp_connection_v2;
+
+#define CREATE_SCHEDULER_POLICY(policy,min_thrs,max_thrs,stack_size) \
+    try {                                                                 \
+        policy = new SchedulerPolicy (7,                                  \
+                          SchedulerKind, RML_THREAD_KIND, /*defined in _rml_serer_msrt.h*/ \
+                          MinConcurrency, min_thrs,                       \
+                          MaxConcurrency, max_thrs,                       \
+                          TargetOversubscriptionFactor, 1,                \
+                          ContextStackSize, stack_size/1000, /*ConcRT:kB, iRML:bytes*/ \
+                          ContextPriority, THREAD_PRIORITY_NORMAL,        \
+                          DynamicProgressFeedback, ProgressFeedbackDisabled ); \
+    } catch ( invalid_scheduler_policy_key & ) {                               \
+        __TBB_ASSERT( false, "invalid scheduler policy key exception caught" );\
+    } catch ( invalid_scheduler_policy_value & ) {                        \
+        __TBB_ASSERT( false, "invalid scheduler policy value exception caught" );\
+    }
+
+static unsigned int core_count;
+static tbb::atomic<int> core_count_inited;
+
+
+static unsigned int get_processor_count()
+{
+    if( core_count_inited!=2 ) {
+        if( core_count_inited.compare_and_swap( 1, 0 )==0 ) {
+            core_count = GetProcessorCount();
+            core_count_inited = 2;
+        } else {
+            tbb::internal::spin_wait_until_eq( core_count_inited, 2 );
+        }
+    }
+    return core_count;
+}
+
+template<typename Connection>
+scheduler<Connection>::scheduler( Connection& conn ) : uid(GetSchedulerId()), my_conn(conn) {}
+
+template<>
+scheduler<tbb_connection_v2>::scheduler( tbb_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn)
+{
+    rml::client& cl = my_conn.client();
+    unsigned max_job_count = cl.max_job_count();
+    unsigned count = get_processor_count();
+    __TBB_ASSERT( max_job_count>0, "max job count must be positive" );
+    __TBB_ASSERT( count>1, "The processor count must be greater than 1" );
+    if( max_job_count>count-1) max_job_count = count-1;
+    CREATE_SCHEDULER_POLICY( my_policy, 0, max_job_count, cl.min_stack_size() );
+}
+
+#if __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED
+template<>
+void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot**, unsigned int)
+{
+}
+#else
+template<>
+void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count )
+{
+    if( !my_conn.is_closing() )
+        my_conn.remove_virtual_processors( vproots, count );
+}
+#endif
+
+template<>
+void scheduler<tbb_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/)
+{
+    __TBB_ASSERT( false, "NotifyResourcesExternallyIdle() is not allowed for TBB" ); 
+}
+
+template<>
+void scheduler<tbb_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ )
+{
+    __TBB_ASSERT( false, "NotifyResourcesExternallyBusy() is not allowed for TBB" ); 
+}
+
+template<>
+scheduler<omp_connection_v2>::scheduler( omp_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn)
+{
+    unsigned count = get_processor_count();
+    rml::client& cl = my_conn.client();
+    __TBB_ASSERT( count>1, "The processor count must be greater than 1" );
+    CREATE_SCHEDULER_POLICY( my_policy, count-1, count-1, cl.min_stack_size() );
+}
+
+template<>
+void scheduler<omp_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ ) {
+    __TBB_ASSERT( false, "RemoveVirtualProcessors() is not allowed for OMP" ); 
+}
+
+template<>
+void scheduler<omp_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ){
+    if( !my_conn.is_closing() )
+        my_conn.notify_resources_externally_idle( vproots, count );
+}
+
+template<>
+void scheduler<omp_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ){
+    if( !my_conn.is_closing() )
+        my_conn.notify_resources_externally_busy( vproots, count );
+}
+
+/* ts_idle, ts_asleep, ts_busy */
+void tbb_server_thread::Dispatch( DispatchState* ) {
+    // Activate() will resume a thread right after Deactivate() as if it returns from the call
+    tbb_connection_v2* tbb_conn = static_cast<tbb_connection_v2*>(my_conn);
+    make_job( *tbb_conn, *this );
+
+    for( ;; ) {
+        // Try to wake some tbb threads if the balance is positive.
+        // When a thread is added by ConcRT and enter here for the first time, 
+        // the thread may wake itself up (i.e., atomically change its state to ts_busy.
+        if( the_balance>0 )
+             wakeup_some_tbb_threads();
+        if( read_state()!=ts_busy )
+            if( sleep_perhaps() )
+                return;
+        if( terminate )
+            if( initiate_termination() )
+                return;
+        if( read_state()==ts_busy ) {
+            // this thread has a coin (i.e., state=ts_busy; it should trip to the scheduler at least once 
+            if ( tbb_conn->has_slack() ) {
+                do {
+                    tbb_conn->try_process( *wait_for_job() );
+                } while( tbb_conn->has_slack() && the_balance>=0 && !is_removed() );
+            }
+            __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" );
+            // see remove_virtual_processors()
+            if( my_state.compare_and_swap( ts_idle, ts_busy )==ts_busy ) {
+                int bal = ++the_balance;
+                if( tbb_conn->has_slack() ) {
+                    // slack is positive, volunteer to help
+                    bal = --the_balance;  // try to grab the coin back
+                    if( bal>=0 ) { // got the coin back
+                        if( my_state.compare_and_swap( ts_busy, ts_idle )!=ts_idle )
+                            ++the_balance; // someone else enlisted me.
+                        // else my_state is ts_busy, I will come back to tbb_conn->try_process().
+                    } else {
+                        // overdraft. return the coin
+                        ++the_balance;
+                    }
+                } // else the new request will see my changes to state & the_balance.
+            } else {
+                __TBB_ASSERT( false, "someone tampered with my state" );
+            } 
+        } // someone else might set the state to somthing other than ts_idle
+    }
+}
+
+void omp_server_thread::Dispatch( DispatchState* ) {
+    // Activate() will resume a thread right after Deactivate() as if it returns from the call
+    make_job( *static_cast<omp_connection_v2*>(my_conn), *this );
+
+    for( ;; ) {
+        if( read_state()!=ts_busy )
+            sleep_perhaps();
+        if( terminate ) {
+            if( initiate_termination() )
+                return;
+        }
+        if( read_state()==ts_busy ) {
+            omp_data.consume();
+            __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" );
+            my_thread_map.adjust_balance( 1 );
+            set_state( ts_idle );
+        }
+        // someone else might set the state to somthing other than ts_idle
+    }
+}
+
+//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. 
+thread_grab_t server_thread_rep::try_grab_for() {
+    thread_grab_t res = wk_failed;
+    thread_state_t s = read_state();
+    switch( s ) {
+    case ts_asleep: 
+        if( wakeup( ts_busy, ts_asleep ) )
+            res = wk_from_asleep;
+        __TBB_ASSERT( res==wk_failed||read_state()==ts_busy, NULL );
+        break;
+    case ts_idle:
+        if( my_state.compare_and_swap( ts_busy, ts_idle )==ts_idle )
+            res = wk_from_idle;
+        // At this point a thread is grabbed (i.e., its state has  changed to ts_busy.
+        // It is possible that the thread 1) processes the job, returns from process() and
+        // sets its state ts_idle again.  In some cases, it even sets its state to ts_asleep.
+        break;
+    default:
+        break;
+    }
+    return res;
+}
+
+bool tbb_server_thread::switch_out() {
+    thread_state_t s = read_state();
+    __TBB_ASSERT( s==ts_asleep||s==ts_busy, NULL );
+    // This thread comes back from the TBB scheduler, and changed its state to ts_asleep successfully.  
+    // The master enlisted it and woke it up by Activate()'ing it; now it is emerging from Deactivated().
+    // ConcRT requested for removal of the vp associated with the thread, and RML marks it removed.  
+    // Now, it has ts_busy, and removed. -- we should remove it.
+    IExecutionResource* old_vp = my_execution_resource;
+    if( s==ts_busy ) {
+        ++the_balance;
+        my_state = ts_asleep;
+    }
+    IThreadProxy* proxy = my_proxy;
+    __TBB_ASSERT( proxy, NULL );
+    my_execution_resource = (IExecutionResource*) c_remove_prepare;
+    old_vp->Remove( my_scheduler );
+    my_execution_resource = (IExecutionResource*) c_remove_returned;
+    int cnt = --activation_count;
+    __TBB_ASSERT_EX( cnt==0||cnt==1, "too many activations?" );
+    proxy->SwitchOut();
+    if( terminate ) {
+        bool activated = activation_count==1;
+#if TBB_USE_ASSERT
+        /* In a rare sequence of events, a thread comes out of SwitchOut with activation_count==1.
+         * 1) The thread is SwitchOut'ed.
+         * 2) AddVirtualProcessors() arrived and the thread is Activated.
+         * 3) The thread is coming out of SwitchOut().
+         * 4) request_close_connection arrives and inform the thread that it is time to terminate.
+         * 5) The thread hits the check and falls into the path with 'activated==true'.
+         * In that case, do the clean-up but do not switch to the thread scavenger; rather simply return to RM.
+         */
+        if( activated ) {
+            // thread is 'revived' in add_virtual_processors after being Activated().
+            // so, if the thread extra state is still marked 'removed', it will shortly change to 'none'
+            // i.e., !is_remove().  The thread state is changed to ts_idle before the extra state, so
+            // the thread's state should be either ts_idle or ts_done.
+            while( is_removed() )
+                __TBB_Yield();
+            thread_state_t s = read_state();
+            __TBB_ASSERT( s==ts_idle || s==ts_done, NULL );
+        }
+#endif
+        __TBB_ASSERT( my_state==ts_asleep||my_state==ts_idle, NULL );
+        // it is possible that in make_job() the thread may not have a chance to create a job.
+        // my_job may not be set if the thread did not get a chance to process client's job (i.e., call try_process())
+        rml::job* j;
+        if( my_job_automaton.try_plug(j) ) {
+            __TBB_ASSERT( j, NULL );
+            my_client.cleanup(*j);
+            my_conn->remove_client_ref();
+        }
+        // Must do remove client reference first, because execution of 
+        // c.remove_ref() can cause *this to be destroyed.
+        if( !activated )
+            proxy->SwitchTo( my_thread_map.get_thread_scavenger(), Idle );
+        my_conn->remove_server_ref();
+        return true;
+    }
+    // We revive a thread in add_virtual_processors() after we Activate the thread on a new virtual processor.
+    // So briefly wait until the thread's my_execution_resource gets set.
+    while( get_virtual_processor()==c_remove_returned )
+        __TBB_Yield();
+    return false;
+}
+
+bool tbb_server_thread::sleep_perhaps () {
+    if( terminate ) return false;
+    thread_state_t s = read_state();
+    if( s==ts_idle ) {
+        if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) {
+            // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate,
+            // the master's compare_and_swap() will fail because the thread's state is ts_idle.
+            // We need to check if terminate is true or not before letting the thread go to sleep oetherwise
+            // we will miss the terminate signal.
+            if( !terminate ) {
+                if( !is_removed() ) {
+                    --activation_count;
+                    get_virtual_processor()->Deactivate( this );
+                }
+                if( is_removed() ) {
+                    if( switch_out() )
+                        return true;
+                    __TBB_ASSERT( my_execution_resource>c_remove_returned, NULL );
+                }
+                // in add_virtual_processors(), when we revive a thread, we change its state after Activate the thread
+                // in that case the state may be ts_asleep for a short period
+                while( read_state()==ts_asleep )
+                    __TBB_Yield();
+            } else {
+                if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep ) {
+                    --activation_count;
+                    // unbind() changed my state. It will call Activate(). So issue a matching Deactivate()
+                    get_virtual_processor()->Deactivate( this );
+                }
+            }
+        }
+    } else {
+        __TBB_ASSERT( s==ts_busy, NULL );
+    }
+    return false;
+}
+
+void omp_server_thread::sleep_perhaps () {
+    if( terminate ) return;
+    thread_state_t s = read_state();
+    if( s==ts_idle ) {
+        if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) {
+            // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate,
+            // the master's compare_and_swap() will fail because the thread's state is ts_idle.
+            // We need to check if terminate is true or not before letting the thread go to sleep oetherwise
+            // we will miss the terminate signal.
+            if( !terminate ) {
+                get_virtual_processor()->Deactivate( this );
+                __TBB_ASSERT( !is_removed(), "OMP threads should not be deprived of a virtual processor" );
+                __TBB_ASSERT( read_state()!=ts_asleep, NULL );
+            } else {
+                if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep )
+                    // unbind() changed my state. It will call Activate(). So issue a matching Deactivate()
+                    get_virtual_processor()->Deactivate( this );
+            }
+        }
+    } else {
+        __TBB_ASSERT( s==ts_busy, NULL );
+    }
+}
+        
+bool tbb_server_thread::initiate_termination() {
+    if( read_state()==ts_busy ) {
+        int bal = ++the_balance; 
+        if( bal>0 ) wakeup_some_tbb_threads();
+    }
+    return destroy_job( (tbb_connection_v2*) my_conn ); 
+}
+
+template<typename Connection>
+bool server_thread_rep::destroy_job( Connection* c ) {
+    __TBB_ASSERT( my_state!=ts_asleep, NULL );
+    rml::job* j;
+    if( my_job_automaton.try_plug(j) ) {
+        __TBB_ASSERT( j, NULL );
+        my_client.cleanup(*j);
+        c->remove_client_ref();
+    }
+    // Must do remove client reference first, because execution of 
+    // c.remove_ref() can cause *this to be destroyed.
+    c->remove_server_ref();
+    return true;
+}
+
+void thread_map::assist_cleanup( bool assist_null_only ) {
+    // To avoid deadlock, the current thread *must* help out with cleanups that have not started,
+    // becausd the thread that created the job may be busy for a long time.
+    for( iterator i = begin(); i!=end(); ++i ) {
+        rml::job* j=0;
+        server_thread* thr = (*i).second;
+        job_automaton& ja = thr->my_job_automaton;
+        if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) {
+            if( j ) {
+                my_client.cleanup(*j);
+            } else {
+                // server thread did not get a chance to create a job.
+            }
+            remove_client_ref();
+        } 
+    }
+}
+
+void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx )
+{
+#if TBB_USE_ASSERT
+    int req_cnt = ++n_add_vp_requests;
+    __TBB_ASSERT( req_cnt==1, NULL );
+#endif
+    std::vector<thread_map::iterator> vec(count);
+    std::vector<tbb_server_thread*> tvec(count);
+    iterator end;
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+        __TBB_ASSERT( my_map.size()==0||count==1, NULL );
+        end = my_map.end(); //remember 'end' at the time of 'find'
+        // find entries in the map for those VPs that were previosly added and then removed.
+        for( size_t i=0; i<count; ++i ) {
+            vec[i] = my_map.find( (key_type) vproots[i] );
+#if TBB_USE_DEBUG
+            if( vec[i]!=end ) {
+                tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second;
+                IVirtualProcessorRoot* v = t->get_virtual_processor();
+                __TBB_ASSERT( v==c_remove_prepare||v==c_remove_returned, NULL );
+            }
+#endif
+        }
+
+        iterator nxt = my_map.begin();
+        for( size_t i=0; i<count; ++i ) {
+            if( vec[i]!=end ) {
+#if TBB_USE_ASSERT
+                tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second;
+                __TBB_ASSERT( t->read_state()==ts_asleep, NULL );
+                IVirtualProcessorRoot* r = t->get_virtual_processor();
+                __TBB_ASSERT( r==c_remove_prepare||r==c_remove_returned, NULL );
+#endif
+                continue;   
+            }
+
+            if( my_unrealized_threads>0 ) {
+                --my_unrealized_threads;
+            } else {
+                __TBB_ASSERT( nxt!=end, "nxt should not be thread_map::iterator::end" );
+                // find a removed thread context for i
+                for( ; nxt!=end; ++nxt ) {
+                    tbb_server_thread* t = (tbb_server_thread*) (*nxt).second;
+                    if( t->is_removed() && t->read_state()==ts_asleep && t->get_virtual_processor()==c_remove_returned ) {
+                        vec[i] = nxt++;
+                        break;
+                    }
+                }
+                // break target
+                if( vec[i]==end ) // ignore excessive VP.
+                    vproots[i] = NULL;
+            }
+        }
+    }
+
+    for( size_t i=0; i<count; ++i ) {
+        __TBB_ASSERT( !tvec[i], NULL );
+        if( vec[i]==end ) {
+            if( vproots[i] ) {
+                tvec[i] = my_tbb_allocator.allocate(1);
+                new ( tvec[i] ) tbb_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client );
+            }
+#if TBB_USE_ASSERT
+        } else {
+            tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second;
+            __TBB_ASSERT( t->GetProxy(), "Proxy is cleared?" );
+#endif
+        }
+    }
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+
+        bool closing = is_closing();
+
+        for( size_t i=0; i<count; ++i ) {
+            if( vec[i]==end ) {
+                if( vproots[i] ) {
+                    thread_map::key_type key = (thread_map::key_type) vproots[i];
+                    vec[i] = insert( key, (server_thread*) tvec[i] );
+                    my_client_ref_count.add_ref();
+                    my_server_ref_count.add_ref();
+                }
+            } else if( !closing ) {
+                tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second;
+
+                if( (*vec[i]).first!=(thread_map::key_type)vproots[i] ) {
+                    my_map.erase( vec[i] );
+                    thread_map::key_type key = (thread_map::key_type) vproots[i];
+                    __TBB_ASSERT( key, NULL );
+                    vec[i] = insert( key, t );
+                }
+                __TBB_ASSERT( t->read_state()==ts_asleep, NULL );
+                // We did not decrement server/client ref count when a thread is removed.
+                // So, don't increment server/client ref count here.
+            }
+        }
+
+        // we could check is_closing() earlier.  That requires marking the newly allocated server_thread objects 
+        // that are not inserted into the thread_map, and deallocate them.  Doing so seems more cumbersome
+        // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation.
+        __TBB_ASSERT( closing==is_closing(), NULL );
+        if( closing ) return;
+    }
+
+    for( size_t i=0; i<count; ++i ) {
+        if( vproots[i] ) {
+            tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second;
+            __TBB_ASSERT( tvec[i]!=NULL||t->GetProxy(), "Proxy is cleared?" );
+            if( t->is_removed() )
+                __TBB_ASSERT( t->get_virtual_processor()==c_remove_returned, NULL );
+            int cnt = ++t->activation_count;
+            __TBB_ASSERT_EX( cnt==0||cnt==1, NULL );
+            vproots[i]->Activate( t );
+            if( t->is_removed() )
+                t->revive( my_scheduler, vproots[i], my_client );
+        }
+    }
+#if TBB_USE_ASSERT
+    req_cnt = --n_add_vp_requests;
+    __TBB_ASSERT( req_cnt==0, NULL );
+#endif
+}
+
+void thread_map::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) {
+    if( my_map.size()==0 )
+        return;
+    tbb::spin_mutex::scoped_lock lck( mtx );
+
+    if( is_closing() ) return;
+
+    for( unsigned int c=0; c<count; ++c ) {
+        iterator i = my_map.find( (key_type) vproots[c] );
+        if( i==my_map.end() ) {
+            thread_scavenger_thread* tst = my_thread_scavenger_thread;
+            if( !tst ) {
+                // Remove unknown vp from my scheduler; 
+                vproots[c]->Remove( my_scheduler );
+            } else {
+                while( (tst=my_thread_scavenger_thread)==c_claimed )
+                    __TBB_Yield();
+                if( vproots[c]!=tst->get_virtual_processor() )
+                    vproots[c]->Remove( my_scheduler );
+            }
+            continue;
+        }
+        tbb_server_thread* thr = (tbb_server_thread*) (*i).second;
+        __TBB_ASSERT( thr->tbb_thread, "incorrect type of server_thread" );
+        thr->set_removed();
+        if( thr->read_state()==ts_asleep ) {
+            while( thr->activation_count>0 ) {
+                if( thr->get_virtual_processor()<=c_remove_returned )
+                    break;
+                __TBB_Yield();
+            }
+            if( thr->get_virtual_processor()>c_remove_returned ) {
+                // the thread is in Deactivated state
+                ++thr->activation_count;
+                // wake the thread up so that it Switches Out itself.
+                thr->get_virtual_processor()->Activate( thr );
+            } // else, it is Switched Out
+        } // else the thread will see that it is removed and proceed to switch itself out without Deactivation 
+    }
+}
+
+void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx )
+{
+    std::vector<thread_map::iterator> vec(count);
+    std::vector<server_thread*> tvec(count);
+    iterator end;
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+        // read the map
+        end = my_map.end(); //remember 'end' at the time of 'find'
+        for( size_t i=0; i<count; ++i )
+            vec[i] = my_map.find( (key_type) vproots[i] );
+    }
+
+    for( size_t i=0; i<count; ++i ) {
+        __TBB_ASSERT( !tvec[i], NULL );
+        if( vec[i]==end ) {
+            tvec[i] = my_omp_allocator.allocate(1);
+            new ( tvec[i] ) omp_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client );
+        } 
+    }
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+
+        for( size_t i=0; i<count; ++i ) {
+            if( vec[i]==my_map.end() ) {
+                thread_map::key_type key = (thread_map::key_type) vproots[i];
+                vec[i] = insert( key, tvec[i] );
+                my_client_ref_count.add_ref();
+                my_server_ref_count.add_ref();
+            }
+        }
+
+        // we could check is_closing() earlier.  That requires marking the newly allocated server_thread objects 
+        // that are not inserted into the thread_map, and deallocate them.  Doing so seems more cumbersome
+        // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation.
+        if( is_closing() ) return;
+    }
+
+    for( size_t i=0; i<count; ++i )
+        vproots[i]->Activate( (*vec[i]).second );
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+        for( size_t i=0; i<count; ++i )
+            original_exec_resources.push_back( vproots[i] );
+    }
+}
+
+void thread_map::mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) {
+    tbb::spin_mutex::scoped_lock lck( mtx );
+
+    if( is_closing() ) return;
+
+    iterator end = my_map.end();
+    for( unsigned int c=0; c<count; ++c ) {
+        iterator i = my_map.find( (key_type) vproots[c] );
+        if( i==end ) {
+            // The vproc has not been added to the map in create_oversubscribers()
+            my_map.insert( hash_map_type::value_type( (key_type) vproots[c], (server_thread*)1 ) );
+        } else {
+            server_thread* thr = (*i).second;
+            if( ((uintptr_t)thr)&~(uintptr_t)1 ) {
+                __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); 
+                ((omp_server_thread*)thr)->set_lent();
+            }
+        }
+    }
+}
+
+void thread_map::create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ) {
+    std::vector<IExecutionResource*> curr_exec_rsc;
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+        curr_exec_rsc = original_exec_resources; // copy construct
+    }
+    typedef std::vector<IExecutionResource*>::iterator iterator_er;
+    typedef ::std::vector<std::pair<hash_map_type::key_type, hash_map_type::mapped_type> > hash_val_vector_t;
+    hash_val_vector_t v_vec(n);
+    iterator_er begin = curr_exec_rsc.begin(); 
+    iterator_er end   = curr_exec_rsc.end(); 
+    iterator_er i = begin;
+    for( unsigned c=0; c<n; ++c ) {
+        IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( *i );
+        omp_server_thread* t = new ( my_omp_allocator.allocate(1) ) omp_server_thread( true, my_scheduler, (IExecutionResource*)vpr, &conn, *this, my_client );
+        thr_vec[c] = t;
+        v_vec[c] = hash_map_type::value_type( (key_type) vpr, t );
+        if( ++i==end ) i = begin;
+    }
+
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+
+        if( is_closing() ) return;
+
+        iterator end = my_map.end();
+        unsigned c = 0;
+        for( hash_val_vector_t::iterator vi=v_vec.begin(); vi!=v_vec.end(); ++vi, ++c ) {
+            iterator i = my_map.find( (key_type) (*vi).first );
+            if( i==end ) {
+                my_map.insert( *vi );
+            } else {
+                // the vproc has not been added to the map in mark_virtual_processors_as_returned();
+                unsigned lent = (unsigned) (*i).second;
+                __TBB_ASSERT( lent<=1, "vproc map entry added incorrectly?");
+                (*i).second = thr_vec[c];
+                if( lent ) 
+                    ((omp_server_thread*)thr_vec[c])->set_lent();
+                else 
+                    ((omp_server_thread*)thr_vec[c])->set_returned();
+            }
+            my_client_ref_count.add_ref();
+            my_server_ref_count.add_ref();
+        }
+    }
+}
+
+void thread_map::wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx ) {
+    std::vector<tbb_server_thread*> vec(c);
+
+    size_t idx = 0;
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+
+        if( is_closing() ) return;
+        // only one RML thread is in here to wake worker threads up.
+
+        int bal = the_balance;
+        int cnt = c<bal ? c : bal;
+
+        if( cnt<=0 ) { return; }
+
+        for( iterator i=begin(); i!=end(); ++i ) {
+            tbb_server_thread* thr = (tbb_server_thread*) (*i).second;
+            // ConcRT RM should take threads away from TBB scheduler instead of lending them to another scheduler
+            if( thr->is_removed() ) 
+                continue;
+
+            if( --the_balance>=0 ) {
+                thread_grab_t res;
+                while( (res=thr->try_grab_for())!=wk_from_idle ) {
+                    if( res==wk_from_asleep ) {
+                        vec[idx++] = thr;
+                        break;
+                    } else {
+                        thread_state_t s = thr->read_state();
+                        if( s==ts_busy ) {// failed because already assigned. move on.
+                            ++the_balance;
+                            goto skip;
+                        }
+                    }
+                }
+                thread_state_t s = thr->read_state();
+                __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" );
+                if( --cnt==0 )
+                    break;
+            } else {
+                // overdraft
+                ++the_balance;
+                break;
+            }
+skip:
+            ;
+        }
+    }
+
+    for( size_t i=0; i<idx; ++i ) {
+        tbb_server_thread* thr = vec[i];
+        __TBB_ASSERT( thr, NULL );
+        thread_state_t s = thr->read_state();
+        __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" );
+        ++thr->activation_count;
+        thr->get_virtual_processor()->Activate( thr );
+    }
+
+}
+
+void thread_map::mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx ) {
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+
+        if( is_closing() ) return;
+
+        iterator end = my_map.end();
+        for(unsigned c=0; c<count; ++c ) {
+            iterator i = my_map.find( (key_type) vprocs[c] );
+            if( i==end ) {
+                // the vproc has not been added to the map in create_oversubscribers()
+                my_map.insert( hash_map_type::value_type( (key_type) vprocs[c], static_cast<server_thread*>(0) ) );
+            } else {
+                omp_server_thread* thr = (omp_server_thread*) (*i).second;
+                if( ((uintptr_t)thr)&~(uintptr_t)1 ) {
+                    __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); 
+                    // we shoud not make any assumption on the initial state of an added vproc.
+                    thr->set_returned();
+                }
+            }
+        }
+    }
+}
+
+
+void thread_map::unbind( rml::server& /*server*/, tbb::spin_mutex& mtx ) {
+    {
+        tbb::spin_mutex::scoped_lock lck( mtx );
+        shutdown_in_progress = true;  // ignore any callbacks from ConcRT RM
+
+        // Ask each server_thread to cleanup its job for this server.
+        for( iterator i = begin(); i!=end(); ++i ) {
+            server_thread* t = (*i).second;
+            t->terminate = true;
+            if( t->is_removed() ) {
+                // This is for TBB only as ConcRT RM does not request OMP schedulers to remove virtual processors
+                if( t->read_state()==ts_asleep ) {
+                    __TBB_ASSERT( my_thread_scavenger_thread, "this is TBB connection; thread_scavenger_thread must be allocated" );
+                    // thread is on its way to switch_out; see remove_virtual_processors() where
+                    // the thread is Activated() to bring it back from 'Deactivated' in sleep_perhaps()
+                    // now assume that the thread will go to SwitchOut()
+#if TBB_USE_ASSERT
+                    while( t->get_virtual_processor()>c_remove_returned )
+                        __TBB_Yield();
+#endif
+                    // A removed thread is supposed to proceed to SwithcOut. 
+                    // There, we remove client&server references.
+                }
+            } else {
+                if( t->wakeup( ts_done, ts_asleep ) ) {
+                    if( t->tbb_thread )
+                        ++((tbb_server_thread*)t)->activation_count;
+                    t->get_virtual_processor()->Activate( t );
+                    // We mark in the thread_map such that when termination sequence started, we ignore 
+                    // all notification from ConcRT RM.
+                }
+            }
+        }
+    }
+    // Remove extra ref to client.
+    remove_client_ref();
+
+    if( my_thread_scavenger_thread ) {
+        thread_scavenger_thread* tst;
+        while( (tst=my_thread_scavenger_thread)==c_claimed )
+            __TBB_Yield();
+#if TBB_USE_ASSERT
+        ++my_thread_scavenger_thread->activation_count;
+#endif
+        tst->get_virtual_processor()->Activate( tst );
+    }
+}
+
+#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED
+void thread_map::allocate_thread_scavenger( IExecutionResource* v ) 
+{
+    if( my_thread_scavenger_thread>c_claimed ) return;
+    thread_scavenger_thread* c = my_thread_scavenger_thread.fetch_and_store((thread_scavenger_thread*)c_claimed);
+    if( c==NULL ) { // successfully claimed
+        add_server_ref();
+#if TBB_USE_ASSERT
+        ++n_thread_scavengers_created;
+#endif
+        __TBB_ASSERT( v, NULL );
+        IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( v );
+        my_thread_scavenger_thread = c = new ( my_scavenger_allocator.allocate(1) ) thread_scavenger_thread( my_scheduler, vpr, *this );
+#if TBB_USE_ASSERT
+        ++c->activation_count;
+#endif
+        vpr->Activate( c );
+    } else if( c>c_claimed ) {
+        my_thread_scavenger_thread = c;
+    }
+}
+#endif
+
+void thread_scavenger_thread::Dispatch( DispatchState* )
+{
+    __TBB_ASSERT( my_proxy, NULL );
+#if TBB_USE_ASSERT
+    --activation_count;
+#endif
+    get_virtual_processor()->Deactivate( this );
+    for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) {
+        tbb_server_thread* t = (tbb_server_thread*) (*i).second;
+        if( t->read_state()==ts_asleep && t->is_removed() ) {
+            while( t->get_execution_resource()!=c_remove_returned )
+                __TBB_Yield();
+            my_proxy->SwitchTo( t, Blocking );
+        }
+    }
+    get_virtual_processor()->Remove( my_scheduler );
+    my_thread_map.remove_server_ref();
+    // signal to the connection scavenger that i am done with the map.
+    __TBB_ASSERT( activation_count==1, NULL );
+    set_state( ts_done );
+}
+
+//! Windows "DllMain" that handles startup and shutdown of dynamic library.
+extern "C" bool WINAPI DllMain( HINSTANCE /*hinstDLL*/, DWORD fwdReason, LPVOID lpvReserved ) {
+    void assist_cleanup_connections();
+    if( fwdReason==DLL_PROCESS_DETACH ) {
+        // dll is being unloaded
+        if( !lpvReserved ) // if FreeLibrary has been called
+            assist_cleanup_connections();
+    }
+    return true;
+}
+
+void free_all_connections( uintptr_t conn_ex ) {
+    while( conn_ex ) {
+        bool is_tbb = (conn_ex&2)>0;
+        //clear extra bits
+        uintptr_t curr_conn = conn_ex & ~(uintptr_t)3;
+        __TBB_ASSERT( curr_conn, NULL );
+
+        // Wait for worker threads to return
+        if( is_tbb ) {
+            tbb_connection_v2* tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn);
+            conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn);
+            while( tbb_conn->my_thread_map.remove_server_ref()>0 ) 
+                __TBB_Yield();
+            delete tbb_conn;
+        } else {
+            omp_connection_v2* omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn);
+            conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn);
+            while( omp_conn->my_thread_map.remove_server_ref()>0 ) 
+                __TBB_Yield();
+            delete omp_conn;
+        }
+    }
+}
+
+void assist_cleanup_connections()
+{
+    //signal to connection_scavenger_thread to terminate 
+    uintptr_t tail = connections_to_reclaim.tail;
+    while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail ) {
+        __TBB_Yield();
+        tail = connections_to_reclaim.tail;
+    }
+
+    __TBB_ASSERT( connection_scavenger.state==ts_busy || connection_scavenger.state==ts_asleep, NULL );
+    // Scavenger thread may be busy freeing connections
+    DWORD thr_exit_code = STILL_ACTIVE;
+    while( connection_scavenger.state==ts_busy ) {
+        if( GetExitCodeThread( connection_scavenger.thr_handle, &thr_exit_code )>0 )
+            if( thr_exit_code!=STILL_ACTIVE )
+                break;
+        __TBB_Yield();
+        thr_exit_code = STILL_ACTIVE;
+    }
+    if( connection_scavenger.state==ts_asleep && thr_exit_code==STILL_ACTIVE )
+        connection_scavenger.wakeup(); // wake the connection scavenger thread up
+
+    // it is possible that the connection scavenger thread already exited.  Take over its responsibility.
+    if( tail && connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked ) {
+        // atomically claim the head of the list.
+        uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty );
+        if( head==garbage_connection_queue::empty )
+            head = tail;
+        connection_scavenger.process_requests( head );
+    }
+    __TBB_ASSERT( connections_to_reclaim.tail==garbage_connection_queue::plugged||connections_to_reclaim.tail==garbage_connection_queue::plugged_acked, "someone else added a request after termination has initiated" );
+    __TBB_ASSERT( the_balance==connection_scavenger.default_concurrency, NULL );
+}
+
+void connection_scavenger_thread::sleep_perhaps() {
+    uintptr_t tail = connections_to_reclaim.tail;
+    // connections_to_reclaim.tail==garbage_connection_queue::plugged --> terminate, 
+    // connections_to_reclaim.tail>garbage_connection_queue::plugged : we got work to do
+    if( tail>=garbage_connection_queue::plugged ) return; 
+    __TBB_ASSERT( !tail, NULL );
+    thread_monitor::cookie c; 
+    monitor.prepare_wait(c);
+    if( state.compare_and_swap( ts_asleep, ts_busy )==ts_busy ) {
+        if( connections_to_reclaim.tail!=garbage_connection_queue::plugged ) {
+            monitor.commit_wait(c);
+            // Someone else woke me up.  The compare_and_swap further below deals with spurious wakeups.
+        } else {
+            monitor.cancel_wait();
+        }
+        thread_state_t s = state;
+        if( s==ts_asleep ) // if spurious wakeup.
+            state.compare_and_swap( ts_busy, ts_asleep );
+            // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup.
+    } else {
+        __TBB_ASSERT( false, "someone else tampered with my state" );
+    }
+    __TBB_ASSERT( state==ts_busy, "a thread can only put itself to sleep" );
+}
+
+void connection_scavenger_thread::process_requests( uintptr_t conn_ex )
+{
+    __TBB_ASSERT( conn_ex>1, NULL );
+    __TBB_ASSERT( n_scavenger_threads==1||connections_to_reclaim.tail==garbage_connection_queue::plugged, "more than one connection_scavenger_thread being active?" );
+
+    bool done = false;
+    while( !done ) {
+        bool is_tbb = (conn_ex&2)>0;
+        //clear extra bits
+        uintptr_t curr_conn = conn_ex & ~(uintptr_t)3;
+
+        // no contention. there is only one connection_scavenger_thread!!
+        uintptr_t next_conn;
+        tbb_connection_v2* tbb_conn = NULL;
+        omp_connection_v2* omp_conn = NULL;
+        // Wait for worker threads to return
+        if( is_tbb ) {
+            tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn);
+            next_conn = reinterpret_cast<uintptr_t>(tbb_conn->next_conn);
+            while( tbb_conn->my_thread_map.get_server_ref_count()>1 ) 
+                __TBB_Yield();
+        } else {
+            omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn);
+            next_conn = reinterpret_cast<uintptr_t>(omp_conn->next_conn);
+            while( omp_conn->my_thread_map.get_server_ref_count()>1 ) 
+                __TBB_Yield();
+        }
+
+        //someone else may try to write into this connection object.
+        //So access next_conn field first before remove the extra server ref count.
+
+        if( next_conn==0 ) {
+            uintptr_t tail = connections_to_reclaim.tail;
+            if( tail==garbage_connection_queue::plugged ) { 
+                tail = garbage_connection_queue::plugged_acked; // connection scavenger saw the flag, and it freed all connections.
+                done = true;
+            } else if( tail==conn_ex ) {
+                if( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::empty, tail )==tail ) {
+                    __TBB_ASSERT( !connections_to_reclaim.head, NULL );
+                    done = true;
+                }
+            }
+            
+            if( !done ) {
+                // A new connection to close is added to connections_to_reclaim.tail; 
+                // Wait for curr_conn->next_conn to be set.
+                if( is_tbb ) {
+                    while( !tbb_conn->next_conn ) 
+                        __TBB_Yield();
+                    conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn);
+                } else {
+                    while( !omp_conn->next_conn ) 
+                        __TBB_Yield();
+                    conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn);
+                }
+            }
+        } else {
+            conn_ex = next_conn;
+        }
+        __TBB_ASSERT( conn_ex, NULL );
+        if( is_tbb )
+            // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM
+            tbb_conn->remove_server_ref();
+        else
+            // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM
+            omp_conn->remove_server_ref();
+    }
+}
+
+__RML_DECL_THREAD_ROUTINE connection_scavenger_thread::thread_routine( void* arg ) {
+    connection_scavenger_thread* thr = (connection_scavenger_thread*) arg;
+    thr->state = ts_busy;
+    thr->thr_handle = GetCurrentThread();
+#if TBB_USE_ASSERT
+    ++thr->n_scavenger_threads;
+#endif 
+    for(;;) {
+        __TBB_Yield();
+        thr->sleep_perhaps();   
+        if( connections_to_reclaim.tail==garbage_connection_queue::plugged || connections_to_reclaim.tail==garbage_connection_queue::plugged_acked ) {
+            thr->state = ts_asleep;
+            return 0;
+        }
+
+        __TBB_ASSERT( connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked, NULL );
+        __TBB_ASSERT( connections_to_reclaim.tail>garbage_connection_queue::plugged && (connections_to_reclaim.tail&garbage_connection_queue::plugged)==0 , NULL );
+        while( connections_to_reclaim.head==garbage_connection_queue::empty )
+            __TBB_Yield();
+        uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty );
+        thr->process_requests( head );
+        wakeup_some_tbb_threads();
+    }
+}
+
+template<typename Server, typename Client>
+void connection_scavenger_thread::add_request( generic_connection<Server,Client>* conn_to_close )
+{
+    uintptr_t conn_ex = (uintptr_t)conn_to_close | (connection_traits<Server,Client>::is_tbb<<1);
+    __TBB_ASSERT( !conn_to_close->next_conn, NULL );
+    uintptr_t old_tail_ex = connections_to_reclaim.tail;
+    __TBB_ASSERT( old_tail_ex==0||old_tail_ex>garbage_connection_queue::plugged_acked, "Unloading DLL called while this connection is being closed?" );
+    tbb::internal::atomic_backoff backoff;
+    while( connections_to_reclaim.tail.compare_and_swap( conn_ex, old_tail_ex )!=old_tail_ex ) {
+        backoff.pause();
+        old_tail_ex = connections_to_reclaim.tail;
+    }
+
+    if( old_tail_ex==garbage_connection_queue::empty )
+        connections_to_reclaim.head = conn_ex;
+    else {
+        bool is_tbb = (old_tail_ex&2)>0;
+        uintptr_t old_tail = old_tail_ex & ~(uintptr_t)3;
+        if( is_tbb )
+            reinterpret_cast<tbb_connection_v2*>(old_tail)->next_conn = reinterpret_cast<tbb_connection_v2*>(conn_ex);
+        else
+            reinterpret_cast<omp_connection_v2*>(old_tail)->next_conn = reinterpret_cast<omp_connection_v2*>(conn_ex);
+    }
+
+    if( state==ts_asleep )
+        wakeup();
+}
+
+template<>
+uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<tbb_server,tbb_client>* /*last_conn_to_close*/ ) { return 0;}
+
+template<>
+uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<omp_server,omp_client>* last_conn_to_close )
+{
+    uintptr_t conn_ex = (uintptr_t)last_conn_to_close;
+    uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty );
+    reinterpret_cast<omp_connection_v2*>(last_conn_to_close)->next_conn = reinterpret_cast<omp_connection_v2*>(head); 
+    return conn_ex;
+}
+
+extern "C" ULONGLONG NTAPI VerSetConditionMask( ULONGLONG, DWORD, BYTE);
+
+bool is_windows7_or_later ()
+{
+    try {
+        return GetOSVersion()>=IResourceManager::Win7OrLater;
+    } catch( ... ) {
+        return false;
+    }
+}
+
+#endif /* RML_USE_WCRM */
+
+template<typename Connection, typename Server, typename Client>
+static factory::status_type connect( factory& f, Server*& server, Client& client ) {
+    server = new Connection(*static_cast<wait_counter*>(f.scratch_ptr),client);
+    return factory::st_success; 
+}
+
+extern "C" factory::status_type __RML_open_factory( factory& f, version_type& server_version, version_type client_version ) {
+    // Hack to keep this library from being closed by causing the first client's dlopen to not have a corresponding dlclose. 
+    // This code will be removed once we figure out how to do shutdown of the RML perfectly.
+    static tbb::atomic<bool> one_time_flag;
+    if( one_time_flag.compare_and_swap(true,false)==false) {
+        __TBB_ASSERT( (size_t)f.library_handle!=factory::c_dont_unload, NULL );
+#if _WIN32||_WIN64
+        f.library_handle = reinterpret_cast<HMODULE>(factory::c_dont_unload);
+#else
+        f.library_handle = reinterpret_cast<void*>(factory::c_dont_unload);
+#endif
+    }
+    // End of hack
+
+    // Initialize the_balance only once
+    if( the_balance_inited!=2 ) {
+        if( the_balance_inited.compare_and_swap( 1, 0 )==0 ) {
+            the_balance = hardware_concurrency()-1;
+            the_balance_inited = 2;
+#if RML_USE_WCRM
+            connection_scavenger.launch( the_balance );
+#endif
+        } else {
+            tbb::internal::spin_wait_until_eq( the_balance_inited, 2 );
+        }
+    }
+
+    server_version = SERVER_VERSION;
+    f.scratch_ptr = 0;
+    if( client_version==0 ) {
+        return factory::st_incompatible;
+#if RML_USE_WCRM
+    } else if ( !is_windows7_or_later() ) {
+#if TBB_USE_DEBUG
+        fprintf(stderr, "This version of the RML library requires Windows 7 to run on.\nConnection request denied.\n");
+#endif
+        return factory::st_incompatible;
+#endif
+    } else {
+#if TBB_USE_DEBUG
+        if( client_version<EARLIEST_COMPATIBLE_CLIENT_VERSION )
+            fprintf(stderr, "This client library is too old for the current RML server.\nThe connection request is granted but oversubscription/undersubscription may occur.\n");
+#endif
+        f.scratch_ptr = new wait_counter;
+        return factory::st_success;
+    }
+}
+
+extern "C" void __RML_close_factory( factory& f ) {
+    if( wait_counter* fc = static_cast<wait_counter*>(f.scratch_ptr) ) {
+        f.scratch_ptr = 0;
+        fc->wait();
+        size_t bal = the_balance;
+        f.scratch_ptr = (void*)bal;
+        delete fc;
+    }
+}
+
+void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg );
+
+}} // rml::internal 
+
+namespace tbb {
+namespace internal {
+namespace rml {
+
+extern "C" tbb_factory::status_type __TBB_make_rml_server( tbb_factory& f, tbb_server*& server, tbb_client& client ) {
+    return ::rml::internal::connect< ::rml::internal::tbb_connection_v2>(f,server,client);
+}
+
+extern "C" void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) {
+    return ::rml::internal::call_with_build_date_str( cb, arg );
+}
+
+}}}
+
+namespace __kmp {
+namespace rml {
+
+extern "C" omp_factory::status_type __KMP_make_rml_server( omp_factory& f, omp_server*& server, omp_client& client ) {
+    return ::rml::internal::connect< ::rml::internal::omp_connection_v2>(f,server,client);
+}
+
+extern "C" void __KMP_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) {
+    return ::rml::internal::call_with_build_date_str( cb, arg );
+}
+
+}}
+
+/*
+ * RML server info
+ */
+#include "version_string.tmp"
+
+#ifndef __TBB_VERSION_STRINGS
+#pragma message("Warning: version_string.tmp isn't generated properly by version_info.sh script!")
+#endif
+
+// We use the build time as the RML server info. TBB is required to build RML, so we make it the same as the TBB build time.
+#ifndef __TBB_DATETIME
+#define __TBB_DATETIME __DATE__ " " __TIME__
+#endif
+
+#if !RML_USE_WCRM
+#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME
+#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION)
+#else
+#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME
+#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION) " on ConcRT RM with " RML_THREAD_KIND_STRING
+#endif
+
+namespace rml {
+namespace internal {
+
+void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg )
+{
+    (*cb)( arg, RML_SERVER_BUILD_TIME );
+    (*cb)( arg, RML_SERVER_VERSION_ST );
+}
+}} // rml::internal 
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/thread_monitor.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/thread_monitor.h
new file mode 100644 (file)
index 0000000..59e1a1c
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// All platform-specific threading support is encapsulated here. */
+#ifndef __RML_thread_monitor_H
+#define __RML_thread_monitor_H
+
+#if USE_WINTHREAD
+#include <windows.h>
+#include <process.h>
+#include <malloc.h> //_alloca
+#elif USE_PTHREAD
+#include <pthread.h>
+#include <string.h>
+#include <stdlib.h>
+#else
+#error Unsupported platform
+#endif 
+#include <stdio.h>
+#include "tbb/itt_notify.h"
+
+// All platform-specific threading support is in this header.
+
+#if (_WIN32||_WIN64)&&!__TBB_ipf
+// Deal with 64K aliasing.  The formula for "offset" is a Fibonacci hash function,
+// which has the desirable feature of spreading out the offsets fairly evenly
+// without knowing the total number of offsets, and furthermore unlikely to
+// accidentally cancel out other 64K aliasing schemes that Microsoft might implement later.
+// See Knuth Vol 3. "Theorem S" for details on Fibonacci hashing.
+// The second statement is really does need "volatile", otherwise the compiler might remove the _alloca.
+#define AVOID_64K_ALIASING(idx)                       \
+    size_t offset = (idx+1) * 40503U % (1U<<16);      \
+    void* volatile sink_for_alloca = _alloca(offset); \
+    __TBB_ASSERT_EX(sink_for_alloca, "_alloca failed");
+#else
+// Linux thread allocators avoid 64K aliasing.
+#define AVOID_64K_ALIASING(idx)
+#endif /* _WIN32||_WIN64 */
+
+namespace rml {
+
+namespace internal {
+
+#if DO_ITT_NOTIFY
+static const ::tbb::tchar *SyncType_RML = _T("%Constant");
+static const ::tbb::tchar *SyncObj_ThreadMonitorLock = _T("RML Lock"),
+                          *SyncObj_ThreadMonitor = _T("RML Thr Monitor");
+#endif /* DO_ITT_NOTIFY */
+
+//! Monitor with limited two-phase commit form of wait.  
+/** At most one thread should wait on an instance at a time. */
+class thread_monitor {
+public:
+    class cookie {
+        friend class thread_monitor;
+        unsigned long long my_version;
+    };
+    thread_monitor();
+    ~thread_monitor();
+
+    //! If a thread is waiting or started a two-phase wait, notify it.
+    /** Can be called by any thread. */
+    void notify();
+
+    //! Begin two-phase wait.
+    /** Should only be called by thread that owns the monitor. 
+        The caller must either complete the wait or cancel it. */
+    void prepare_wait( cookie& c );
+
+    //! Complete a two-phase wait and wait until notification occurs after the earlier prepare_wait.
+    void commit_wait( cookie& c );
+
+    //! Cancel a two-phase wait.
+    void cancel_wait();
+
+#if USE_WINTHREAD
+#define __RML_DECL_THREAD_ROUTINE unsigned WINAPI
+    typedef unsigned (WINAPI *thread_routine_type)(void*);
+#endif /* USE_WINTHREAD */
+
+#if USE_PTHREAD
+#define __RML_DECL_THREAD_ROUTINE void*
+    typedef void*(*thread_routine_type)(void*);
+#endif /* USE_PTHREAD */
+
+    //! Launch a thread
+    static void launch( thread_routine_type thread_routine, void* arg, size_t stack_size );
+    static void yield();
+
+private:
+    cookie my_cookie;
+#if USE_WINTHREAD
+    CRITICAL_SECTION critical_section;
+    HANDLE event;
+#endif /* USE_WINTHREAD */
+#if USE_PTHREAD
+    pthread_mutex_t my_mutex;
+    pthread_cond_t my_cond;
+    static void check( int error_code, const char* routine );
+#endif /* USE_PTHREAD */
+};
+
+#if USE_WINTHREAD
+#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
+#define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000
+#endif
+inline void thread_monitor::launch( thread_routine_type thread_routine, void* arg, size_t stack_size ) {
+    unsigned thread_id;
+    uintptr_t status = _beginthreadex( NULL, unsigned(stack_size), thread_routine, arg, STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id );
+    if( status==0 ) {
+        fprintf(stderr,"thread_monitor::launch: _beginthreadex failed\n");
+        exit(1); 
+    } else {
+        CloseHandle((HANDLE)status);
+    }
+}
+
+inline void thread_monitor::yield() {
+    SwitchToThread();
+}
+
+inline thread_monitor::thread_monitor() {
+    event = CreateEvent( NULL, /*manualReset=*/true, /*initialState=*/false, NULL );
+    InitializeCriticalSection( &critical_section );
+    ITT_SYNC_CREATE(&event, SyncType_RML, SyncObj_ThreadMonitor);
+    ITT_SYNC_CREATE(&critical_section, SyncType_RML, SyncObj_ThreadMonitorLock);
+    my_cookie.my_version = 0;
+}
+
+inline thread_monitor::~thread_monitor() {
+    // Fake prepare/acquired pair for Intel(R) Parallel Amplifier to correctly attribute the operations below
+    ITT_NOTIFY( sync_prepare, &event );
+    CloseHandle( event );
+    DeleteCriticalSection( &critical_section );
+    ITT_NOTIFY( sync_acquired, &event );
+}
+     
+inline void thread_monitor::notify() {
+    EnterCriticalSection( &critical_section );
+    ++my_cookie.my_version;
+    SetEvent( event );
+    LeaveCriticalSection( &critical_section );
+}
+
+inline void thread_monitor::prepare_wait( cookie& c ) {
+    EnterCriticalSection( &critical_section );
+    c = my_cookie;
+}
+
+inline void thread_monitor::commit_wait( cookie& c ) {
+    ResetEvent( event );
+    LeaveCriticalSection( &critical_section );
+    while( my_cookie.my_version==c.my_version ) {
+        WaitForSingleObject( event, INFINITE );
+        ResetEvent( event );
+    }
+}
+
+inline void thread_monitor::cancel_wait() {
+    LeaveCriticalSection( &critical_section );
+}
+#endif /* USE_WINTHREAD */
+
+#if USE_PTHREAD
+inline void thread_monitor::check( int error_code, const char* routine ) {
+    if( error_code ) {
+        fprintf(stderr,"thread_monitor %s\n", strerror(error_code) );
+        exit(1);
+    }
+}
+
+inline void thread_monitor::launch( void* (*thread_routine)(void*), void* arg, size_t stack_size ) {
+    // FIXME - consider more graceful recovery than just exiting if a thread cannot be launched.
+    // Note that there are some tricky situations to deal with, such that the thread is already 
+    // grabbed as part of an OpenMP team. 
+    pthread_attr_t s;
+    check(pthread_attr_init( &s ), "pthread_attr_init");
+    if( stack_size>0 ) {
+        check(pthread_attr_setstacksize( &s, stack_size ),"pthread_attr_setstack_size");
+    }
+    pthread_t handle;
+    check( pthread_create( &handle, &s, thread_routine, arg ), "pthread_create" );
+    check( pthread_detach( handle ), "pthread_detach" );
+}
+
+inline void thread_monitor::yield() {
+    sched_yield();
+}
+
+inline thread_monitor::thread_monitor() {
+    check( pthread_cond_init(&my_cond,NULL), "pthread_cond_init" );
+    check( pthread_mutex_init(&my_mutex,NULL), "pthread_mutex_init" );
+    ITT_SYNC_CREATE(&my_cond, SyncType_RML, SyncObj_ThreadMonitor);
+    ITT_SYNC_CREATE(&my_mutex, SyncType_RML, SyncObj_ThreadMonitorLock);
+    my_cookie.my_version = 0;
+}
+
+inline thread_monitor::~thread_monitor() {
+    pthread_cond_destroy(&my_cond);
+    pthread_mutex_destroy(&my_mutex);
+}
+
+inline void thread_monitor::notify() {
+    check( pthread_mutex_lock( &my_mutex ), "pthread_mutex_lock" );
+    ++my_cookie.my_version;
+    check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" );
+    check( pthread_cond_signal(&my_cond), "pthread_cond_signal" );
+}
+
+inline void thread_monitor::prepare_wait( cookie& c ) {
+    check( pthread_mutex_lock( &my_mutex ), "pthread_mutex_lock" );
+    c = my_cookie;
+}
+
+inline void thread_monitor::commit_wait( cookie& c ) {
+    while( my_cookie.my_version==c.my_version ) {
+        pthread_cond_wait( &my_cond, &my_mutex );
+    }
+    check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" );
+}
+
+inline void thread_monitor::cancel_wait() {
+    check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" );
+}
+#endif /* USE_PTHREAD */
+
+} // namespace internal
+} // namespace rml
+
+#endif /* __RML_thread_monitor_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/wait_counter.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/wait_counter.h
new file mode 100644 (file)
index 0000000..4018d1b
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __RML_wait_counter_H
+#define __RML_wait_counter_H
+
+#include "thread_monitor.h"
+#include "tbb/atomic.h"
+
+namespace rml {
+namespace internal {
+
+class wait_counter {
+    thread_monitor my_monitor;
+    tbb::atomic<int> my_count;
+    tbb::atomic<int> n_transients;
+public:
+    wait_counter() { 
+        // The "1" here is subtracted by the call to "wait".
+        my_count=1;
+        n_transients=0;
+    }
+
+    //! Wait for number of operator-- invocations to match number of operator++ invocations.
+    /** Exactly one thread should call this method. */
+    void wait() {
+        int k = --my_count;
+        __TBB_ASSERT( k>=0, "counter underflow" );
+        if( k>0 ) {
+            thread_monitor::cookie c;
+            my_monitor.prepare_wait(c);
+            if( my_count )
+                my_monitor.commit_wait(c);
+            else 
+                my_monitor.cancel_wait();
+        }
+        while( n_transients>0 )
+            __TBB_Yield();
+    }
+    void operator++() {
+        ++my_count;
+    }
+    void operator--() {
+        ++n_transients;
+        int k = --my_count;
+        __TBB_ASSERT( k>=0, "counter underflow" );
+        if( k==0 ) 
+            my_monitor.notify();
+        --n_transients;
+    }
+};
+
+} // namespace internal
+} // namespace rml
+
+#endif /* __RML_wait_counter_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win32-rml-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win32-rml-export.def
new file mode 100644 (file)
index 0000000..7902330
--- /dev/null
@@ -0,0 +1,35 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+__RML_open_factory
+__RML_close_factory
+__TBB_make_rml_server
+__KMP_make_rml_server
+__TBB_call_with_my_server_info
+__KMP_call_with_my_server_info
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win64-rml-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/rml/server/win64-rml-export.def
new file mode 100644 (file)
index 0000000..7902330
--- /dev/null
@@ -0,0 +1,35 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+__RML_open_factory
+__RML_close_factory
+__TBB_make_rml_server
+__KMP_make_rml_server
+__TBB_call_with_my_server_info
+__KMP_call_with_my_server_info
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/rml_omp_stub.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/rml_omp_stub.cpp
new file mode 100644 (file)
index 0000000..f0a9587
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// This file is compiled with C++, but linked with a program written in C.
+// The intent is to find dependencies on the C++ run-time.
+
+#include <stdlib.h>
+#define RML_PURE_VIRTUAL_HANDLER abort
+
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+// VS2008/VC9 seems to have an issue; 
+#pragma warning( push )
+#pragma warning( disable: 4100 ) 
+#endif          
+#include "rml_omp.h"
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+#pragma warning( pop )
+#endif
+
+rml::versioned_object::version_type Version;
+
+class MyClient: public __kmp::rml::omp_client {
+public:
+    /*override*/rml::versioned_object::version_type version() const {return 0;}
+    /*override*/size_type max_job_count() const {return 1024;}
+    /*override*/size_t min_stack_size() const {return 1<<20;}
+    /*override*/rml::job* create_one_job() {return NULL;}
+    /*override*/void acknowledge_close_connection() {}
+    /*override*/void cleanup(job&) {}
+    /*override*/policy_type policy() const {return throughput;}
+    /*override*/void process( job&, void*, __kmp::rml::omp_client::size_type ) {}
+   
+};
+
+//! Never actually set, because point of test is to find linkage issues.
+__kmp::rml::omp_server* MyServerPtr;
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#define HARNESS_CUSTOM_MAIN 1
+#include "harness.h"
+
+extern "C" void Cplusplus() {
+    MyClient client;
+    Version = client.version();
+    REPORT("done\n");
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_job_automaton.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_job_automaton.cpp
new file mode 100644 (file)
index 0000000..0aae636
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "job_automaton.h"
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+#include "harness_barrier.h"
+
+class State {
+    Harness::SpinBarrier barrier;
+    rml::internal::job_automaton ja;
+    rml::job job;
+    tbb::atomic<int> job_created;
+    tbb::atomic<int> job_destroyed;
+    tbb::atomic<bool> job_received;
+public:
+    State() : barrier(2) {
+        job_created = 0;
+        job_destroyed = 0;
+        job_received = false;
+    }
+    void exercise( bool is_owner );
+    ~State() {
+        ASSERT( job_created==job_destroyed, "accounting error" );
+        ASSERT( job_destroyed<=1, "destroyed job twice" );
+    }
+};
+
+int DelayMask;
+const int N = 14; 
+tbb::atomic<int> Coverage[N];
+
+//! Mark kth interval as covered and insert delay if kth bit of DelayMask is set.
+/** An interval is the code between two operations on the job_automaton that we are testing. */
+void Cover( int k ) {
+    ASSERT( k<N, NULL );
+    ++Coverage[k];
+    if( DelayMask>>k&1 ) {
+        // Introduce delay (and possibly a thread context switch)
+        __TBB_Yield();
+    }
+}
+
+void State::exercise( bool is_owner ) {
+    barrier.wait();
+    if( is_owner ) {
+        Cover(0);
+        if( ja.try_acquire() ) {
+            Cover(1);
+            ++job_created; 
+            ja.set_and_release(job);
+            Cover(2);
+            if( ja.try_acquire() ) {
+                Cover(3);
+                ja.release();
+                Cover(4);
+                if( ja.try_acquire() ) {
+                    Cover(5);
+                    ja.release();
+                }
+            }
+            Cover(6);
+        } else {
+            Cover(7);
+        }
+        if( DelayMask&1<<N ) {
+            while( !job_received ) 
+                __TBB_Yield();
+        }
+    } else {
+        // Using extra bit of DelayMask for choosing whether to run wait_for_job or not.
+        if( DelayMask&1<<N ) {
+            rml::job* j= &ja.wait_for_job(); 
+            if( j!=&job ) REPORT("%p\n",j);
+            ASSERT( j==&job, NULL );
+            job_received = true;
+        }
+        Cover(8);
+    }   
+    rml::job* j;
+    if( ja.try_plug(j) ) {
+        ASSERT( j==&job || !j, NULL );
+        if( j ) {
+            Cover(9+is_owner);
+            ++job_destroyed;
+        } else {
+            __TBB_ASSERT( !is_owner, "owner failed to create job but plugged self" );
+            Cover(11);
+        } 
+    } else {
+        Cover(12+is_owner);
+    }
+}
+
+class Loop: NoAssign {
+    State& s;
+public:
+    Loop(State& s_) : s(s_) {}
+    void operator()( int i ) const {s.exercise(i==0);}
+};
+
+/** Return true if coverage is acceptable.
+    If report==true, issue message if it is unacceptable. */
+bool CheckCoverage( bool report ) {
+    bool okay = true;
+    for( int i=0; i<N; ++i ) {
+        const int min_coverage = 4; 
+        if( Coverage[i]<min_coverage ) {
+            okay = false;
+            if( report )
+                REPORT("Warning: Coverage[%d]=%d is less than acceptable minimum of %d\n", i, int(Coverage[i]),min_coverage);
+        }
+    }
+    return okay;
+}
+
+int TestMain () {
+    for( DelayMask=0; DelayMask<8<<N; ++DelayMask ) {
+        State s;
+        NativeParallelFor( 2, Loop(s) );
+        if( CheckCoverage(false) ) { 
+            // Reached acceptable code coverage level
+            break;
+        }
+    }
+    CheckCoverage(true);
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_mixed.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_mixed.cpp
new file mode 100644 (file)
index 0000000..32c2f25
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_tbb.h"
+#include "rml_omp.h"
+#include "tbb/atomic.h"
+#include "tbb/tick_count.h"
+
+#define HARNESS_DEFAULT_MIN_THREADS 4
+#include "harness.h"
+
+const int OMP_ParallelRegionSize = 16;
+int TBB_MaxThread = 4;           // Includes master 
+int OMP_MaxThread = int(~0u>>1); // Includes master
+
+template<typename Client>
+class ClientBase: public Client {
+protected:
+    typedef typename Client::version_type version_type;
+    typedef typename Client::job job;
+    typedef typename Client::policy_type policy_type;
+
+private:
+    /*override*/version_type version() const {
+        return 0;
+    }
+    /*override*/size_t min_stack_size() const {
+        return 1<<20;
+    }
+    /*override*/job* create_one_job() {
+        return new rml::job;
+    }
+    /*override*/policy_type policy() const {
+        return Client::turnaround;
+    }
+    /*override*/void acknowledge_close_connection() {
+        delete this;
+    }
+    /*override*/void cleanup( job& j ) {delete &j;}
+};
+
+//! Represents a TBB or OpenMP run-time that uses RML.
+template<typename Factory, typename Client>
+class RunTime {
+public:
+    //! Factory that run-time uses to make servers.
+    Factory factory;
+    Client* client;
+    typename Factory::server_type* server;
+#if _WIN32||_WIN64
+    ::rml::server::execution_resource_t me;
+#endif
+    RunTime() {
+        factory.open();
+    }
+    ~RunTime() {
+        factory.close();
+    }
+    //! Create server for this run-time
+    void create_connection();
+
+    //! Destroy server for this run-time
+    void destroy_connection();
+};
+
+class ThreadLevelRecorder {
+    tbb::atomic<int> level;
+    struct record {
+        tbb::tick_count time;
+        int nthread;
+    };
+    tbb::atomic<unsigned> next;
+    /** Must be power of two */
+    static const unsigned max_record_count = 1<<20;
+    record array[max_record_count];
+public:
+    void change_level( int delta );
+    void dump();
+};
+
+void ThreadLevelRecorder::change_level( int delta ) {
+    int x = level+=delta;
+    tbb::tick_count t = tbb::tick_count::now();
+    unsigned k = next++;
+    if( k<max_record_count ) {
+        record& r = array[k];
+        r.time = t;
+        r.nthread = x;
+    } 
+}
+
+void ThreadLevelRecorder::dump() {
+    FILE* f = fopen("time.txt","w");
+    if( !f ) {
+        perror("fopen(time.txt)\n");
+        exit(1);
+    }
+    unsigned limit = next;
+    if( limit>max_record_count ) {
+        // Clip
+        limit = next;
+    }
+    for( unsigned i=0; i<limit; ++i ) {
+        fprintf(f,"%f\t%d\n",(array[i].time-array[0].time).seconds(),array[i].nthread);
+    }
+    fclose(f);
+}
+
+ThreadLevelRecorder TotalThreadLevel;
+
+class TBB_Client: public ClientBase<tbb::internal::rml::tbb_client> {
+    /*override*/void process( job& j );
+    /*override*/size_type max_job_count() const {
+        return TBB_MaxThread-1;
+    }
+};
+
+class OMP_Client: public ClientBase<__kmp::rml::omp_client> {
+    /*override*/void process( job&, void* cookie, omp_client::size_type );
+    /*override*/size_type max_job_count() const {
+        return OMP_MaxThread-1;
+    }
+};
+
+RunTime<tbb::internal::rml::tbb_factory, TBB_Client> TBB_RunTime;
+RunTime<__kmp::rml::omp_factory, OMP_Client> OMP_RunTime;
+
+template<typename Factory, typename Client>
+void RunTime<Factory,Client>::create_connection() {
+    client = new Client;
+    typename Factory::status_type status = factory.make_server( server, *client );
+    ASSERT( status==Factory::st_success, NULL );
+#if _WIN32||_WIN64
+    server->register_master( me );
+#endif /* _WIN32||_WIN64 */
+}
+
+template<typename Factory, typename Client>
+void RunTime<Factory,Client>::destroy_connection() {
+#if _WIN32||_WIN64
+    server->unregister_master( me );
+#endif /* _WIN32||_WIN64 */
+    server->request_close_connection();
+    server = NULL;
+}
+
+class OMP_Team {
+public:
+    OMP_Team( __kmp::rml::omp_server& ) {}
+    tbb::atomic<unsigned> barrier;
+};
+
+tbb::atomic<int> AvailWork;
+tbb::atomic<int> CompletionCount;
+void OMPWork() {
+    tbb::atomic<int> x;
+    for( x=0; x<2000000; ++x ) {
+        continue;
+    }
+}
+
+void TBBWork() {
+    if( AvailWork>=0 ) {
+        int k = --AvailWork;
+        if( k==-1 ) {
+            TBB_RunTime.server->adjust_job_count_estimate(-(TBB_MaxThread-1));
+            ++CompletionCount;
+        } else if( k>=0 ) {
+            for( int k=0; k<4; ++k ) {
+                OMP_Team team( *OMP_RunTime.server );
+                int n = OMP_RunTime.server->try_increase_load( OMP_ParallelRegionSize-1, /*strict=*/false );
+                team.barrier = 0;
+                ::rml::job* array[OMP_ParallelRegionSize-1];
+                if( n>0)
+                    OMP_RunTime.server->get_threads( n, &team, array );
+                // Master does work inside parallel region too.
+                OMPWork();
+                // Master waits for workers to finish
+                if( n>0 )
+                    while( team.barrier!=unsigned(n) ) {
+                        __TBB_Yield();
+                    } 
+            }
+            ++CompletionCount;
+        }
+    }
+}
+
+/*override*/void TBB_Client::process( job& ) {
+    TotalThreadLevel.change_level(1);
+    TBBWork();
+    TotalThreadLevel.change_level(-1);
+}  
+
+/*override*/void OMP_Client::process( job& /* j */, void* cookie, omp_client::size_type ) {
+    TotalThreadLevel.change_level(1);
+    ASSERT( OMP_RunTime.server, NULL );
+    OMPWork();
+    ASSERT( OMP_RunTime.server, NULL );
+    static_cast<OMP_Team*>(cookie)->barrier+=1;
+    TotalThreadLevel.change_level(-1);
+}
+
+void TBBOutSideOpenMPInside() {
+    TotalThreadLevel.change_level(1);
+    CompletionCount = 0;
+    int tbbtasks = 32;
+    AvailWork = tbbtasks;
+    TBB_RunTime.server->adjust_job_count_estimate(TBB_MaxThread-1);
+    while( CompletionCount!=tbbtasks+1 ) {
+        TBBWork();
+    }
+    TotalThreadLevel.change_level(-1);
+}  
+
+int TestMain () {
+    for( int TBB_MaxThread=MinThread; TBB_MaxThread<=MaxThread; ++TBB_MaxThread ) {
+        REMARK("Testing with TBB_MaxThread=%d\n", TBB_MaxThread);
+        TBB_RunTime.create_connection();
+        OMP_RunTime.create_connection();
+        TBBOutSideOpenMPInside();
+        OMP_RunTime.destroy_connection();
+        TBB_RunTime.destroy_connection();
+    }
+    TotalThreadLevel.dump();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp.cpp
new file mode 100644 (file)
index 0000000..99000d1
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_omp.h"
+
+typedef __kmp::rml::omp_server MyServer;
+typedef __kmp::rml::omp_factory MyFactory;
+
+// Forward declaration for the function used in test_server.h
+void DoClientSpecificVerification( MyServer& , int );
+
+#define HARNESS_DEFAULT_MIN_THREADS 0
+#include "test_server.h"
+#include "tbb/tbb_misc.h"
+
+static bool StrictTeam;
+
+class MyTeam {
+    MyTeam& operator=( const MyTeam& ) ;
+public:
+    struct info_type {
+        rml::job* job;
+        bool ran;
+        info_type() : job(NULL), ran(false) {}
+    };
+    MyTeam( MyServer& /* server */, size_t max_thread_ ) :
+        max_thread(max_thread_)
+    {
+        self_ptr = this;
+        info = new info_type[max_thread];
+    }
+    ~MyTeam() {
+        delete[] info;
+    }
+    const size_t max_thread;
+    size_t n_thread;
+    tbb::atomic<int> barrier;
+    /** Indexed with 1-origin index */
+    info_type* info;
+    int iteration;
+    MyTeam* self_ptr;
+};
+
+class MyClient: public ClientBase<__kmp::rml::omp_client> {
+public:
+    MyServer* server;
+    /*override*/void process( job& j, void* cookie, size_type index ) {
+        MyTeam& t = *static_cast<MyTeam*>(cookie);
+        ASSERT( t.self_ptr==&t, "trashed cookie" ); 
+        ASSERT( index<t.max_thread, NULL ); 
+        ASSERT( !t.info[index].ran, "duplicate index?" ); 
+        t.info[index].job = &j;
+        t.info[index].ran = true;
+        do_process(j);
+        if( index==1 && nesting.level<nesting.limit ) {
+            DoOneConnection<MyFactory,MyClient> doc(MaxThread,Nesting(nesting.level+1,nesting.limit),0,false);
+            doc(0);
+        }
+#if _WIN32||_WIN64
+        // test activate/deactivate
+        if( t.n_thread>1 && t.n_thread%2==0 ) {
+            if( nesting.level==0 ) {
+                if( index&1 ) {
+                    size_type target = index-1;
+                    ASSERT(  target<t.max_thread, NULL ); 
+                    // wait until t.info[target].job is defined
+                    tbb::internal::spin_wait_until_eq( t.info[target].ran, true );
+                    server->try_increase_load( 1, true );
+                    server->reactivate( t.info[target].job );
+                } else {
+                    server->deactivate( &j );
+                }
+            }
+        }
+#endif /* _WIN32||_WIN64 */
+        ++t.barrier;
+    }
+    static const bool is_omp = true;
+    bool is_strict() const {return StrictTeam;}
+};
+
+void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
+    ASSERT( max_thread>=0, NULL );
+#if _WIN32||_WIN64
+    ::rml::server::execution_resource_t me;
+    server.register_master( me );
+#endif /* _WIN32||_WIN64 */
+    client.server = &server;
+    MyTeam team(server,size_t(max_thread));
+    MyServer::size_type n_thread = 0;
+    for( int iteration=0; iteration<4; ++iteration ) {
+        for( size_t i=0; i<team.max_thread; ++i )
+            team.info[i].ran = false;
+        switch( iteration ) {
+            default:
+                n_thread = int(max_thread);
+                break;
+            case 1:
+                // No change in number of threads
+                break;
+            case 2:
+                // Decrease number of threads.  
+                n_thread = int(max_thread)/2;
+                break;
+            // Case 3 is same code as the default, but has effect of increasing the number of threads.
+        }
+        team.barrier = 0;
+        REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
+        server.independent_thread_number_changed( n_extra );
+        if( checker ) {
+            // Give RML time to respond to change in number of threads.
+            MilliSleep(1);
+        }
+        int n_delivered = server.try_increase_load( n_thread, StrictTeam );
+        ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
+        if( n_delivered<0 ) {
+            REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
+            server.independent_thread_number_changed( -n_extra );
+            n_delivered = 0;
+        } else {
+            team.n_thread = n_delivered;
+            ::rml::job* job_array[JobArraySize];
+            job_array[n_delivered] = (::rml::job*)intptr_t(-1);
+            server.get_threads( n_delivered, &team, job_array );
+            __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
+            for( int i=0; i<n_delivered; ++i ) {
+                MyJob* j = static_cast<MyJob*>(job_array[i]);
+                int s = j->state;
+                ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
+            }
+            server.independent_thread_number_changed( -n_extra );
+            REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
+            if( checker ) {
+                checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
+            }      
+            // Protocol requires that master wait until workers have called "done_processing"
+            while( team.barrier!=n_delivered ) {
+                ASSERT( team.barrier>=0, NULL );
+                ASSERT( team.barrier<=n_delivered, NULL );
+                __TBB_Yield();
+            }
+            REMARK("client %d: team completed\n", client.client_id() );
+            for( int i=0; i<n_delivered; ++i ) {
+                ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
+            }
+        }
+        for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
+            ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
+        }
+    }
+#if _WIN32||_WIN64
+    server.unregister_master( me );
+#endif
+}
+
+void DoClientSpecificVerification( MyServer& server, int /*n_thread*/ )
+{
+    ASSERT( server.current_balance()==int(tbb::internal::DetectNumberOfWorkers())-1, NULL );
+}
+
+int TestMain () {
+    StrictTeam = true;
+    VerifyInitialization<MyFactory,MyClient>( MaxThread );
+    SimpleTest<MyFactory,MyClient>();
+
+    StrictTeam = false;
+    VerifyInitialization<MyFactory,MyClient>( MaxThread );
+    SimpleTest<MyFactory,MyClient>();
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c
new file mode 100644 (file)
index 0000000..cb984bd
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+void Cplusplus();
+
+int main() {
+    Cplusplus();      
+    return 0;
+} 
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_tbb.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_rml_tbb.cpp
new file mode 100644 (file)
index 0000000..0116fcb
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_tbb.h"
+
+typedef tbb::internal::rml::tbb_server MyServer;
+typedef tbb::internal::rml::tbb_factory MyFactory;
+
+// Forward declaration of the function used in test_server.h
+void DoClientSpecificVerification( MyServer&, int );
+
+#define HARNESS_DEFAULT_MIN_THREADS 0
+#include "test_server.h"
+
+tbb::atomic<int> n_available_hw_threads;
+
+class MyClient: public ClientBase<tbb::internal::rml::tbb_client> {
+    tbb::atomic<int> counter;
+    tbb::atomic<int> gate;
+    /*override*/void process( job& j ) {
+        do_process(j);
+        //wait until the gate is open.
+        while( gate==0 )
+            MilliSleep(1);
+
+        __TBB_ASSERT( nesting.limit<=2, NULL );
+        if( nesting.level>=nesting.limit )
+            return;
+
+        size_type max_outstanding_connections = max_job_count(); // if nesting.level==0
+        if( nesting.level==1 )
+            max_outstanding_connections *= (1+max_outstanding_connections);
+
+        if( default_concurrency()<=max_outstanding_connections+2 )
+            // i.e., if it is not guaranteed that at least two connections may be made without depleting the_balance
+            return;
+
+        // at this point, ( nesting.level<nesting.limit ) && ( my_server->default_concurrency()-max_outstanding_connections>2 ) 
+        for( ;; ) {
+            while( n_available_hw_threads<=1 )
+                MilliSleep(1);
+
+            int n = --n_available_hw_threads;
+            if( n>0 ) break;
+            // else I lost
+            ++n_available_hw_threads;
+        }
+        
+        DoOneConnection<MyFactory,MyClient> doc(max_job_count(),Nesting(nesting.level+1,nesting.limit),0,false);
+        doc(0);
+
+        ++n_available_hw_threads;
+    }
+public:
+    MyClient() {counter=1;}
+    static const bool is_omp = false;
+    bool is_strict() const {return false;}
+    void open_the_gate() { gate = 1; }
+    void close_the_gate() { gate = 0; }
+};
+
+void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) {
+    REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread);
+    // Exercise independent_thread_number_changed, even for zero values.
+    server.independent_thread_number_changed( n_extra );
+#if _WIN32||_WIN64
+    ::rml::server::execution_resource_t me;
+    server.register_master( me );
+#endif /* _WIN32||_WIN64 */
+    // Experiments indicate that when oversubscribing, the main thread should wait a little
+    // while for the RML worker threads to do some work. 
+    if( checker ) {
+        // Give RML time to respond to change in number of threads.
+        MilliSleep(1);
+        for( int k=0; k<n_thread; ++k )
+            client.job_array[k].processing_count = 0;
+    }
+    //close the gate to keep worker threads from returning to RML until a snapshot is taken
+    client.close_the_gate();
+    server.adjust_job_count_estimate( n_thread );
+    int n_used = 0;
+    if( checker ) {
+        MilliSleep(100);
+        for( int k=0; k<n_thread; ++k )
+            if( client.job_array[k].processing_count )
+                ++n_used;
+    }
+    // open the gate
+    client.open_the_gate();
+    // Logic further below presumes that jobs never starve, so undo previous call
+    // to independent_thread_number_changed before waiting on those jobs.
+    server.independent_thread_number_changed( -n_extra );
+    REMARK("client %d: wait for each job to be processed at least once\n",client.client_id());
+    // Calculate the number of jobs that are expected to get threads.
+    int expected = n_thread;
+    // Wait for expected number of jobs to be processed.
+#if RML_USE_WCRM
+    int default_concurrency = server.default_concurrency();
+    if( N_TestConnections>0 ) {
+        if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) {
+#endif /* RML_USE_WCRM */
+            for(;;) {
+                int n = 0;
+                for( int k=0; k<n_thread; ++k ) 
+                    if( client.job_array[k].processing_count!=0 ) 
+                        ++n;
+                    if( n>=expected ) break;
+                server.yield();
+            }
+#if RML_USE_WCRM
+        } else if( n_thread>0 ) {
+            for( int m=0; m<20; ++m ) {
+                int n = 0;
+                for( int k=0; k<n_thread; ++k ) 
+                    if( client.job_array[k].processing_count!=0 ) 
+                        ++n;
+                if( n>=expected ) break;
+                MilliSleep(1);
+            }
+        }
+    }
+#endif /* RML_USE_WCRM */
+    server.adjust_job_count_estimate(-n_thread);
+#if _WIN32||_WIN64
+    server.unregister_master( me );
+#endif
+    // Give RML some time to respond
+    if( checker ) {
+        MilliSleep(1);
+        checker->check_number_of_threads_delivered( n_used, n_thread, n_extra );
+    }
+}
+
+void DoClientSpecificVerification( MyServer&, int n_thread )
+{
+    MyClient* client = new MyClient;
+    client->initialize( n_thread, Nesting(), ClientStackSize[0] );
+    MyFactory factory;
+    memset( &factory, 0, sizeof(factory) );
+    MyFactory::status_type status = factory.open();
+    ASSERT( status!=MyFactory::st_not_found, "could not find RML library" );
+    ASSERT( status!=MyFactory::st_incompatible, NULL );
+    ASSERT( status==MyFactory::st_success, NULL );
+    MyFactory::server_type* server; 
+    status = factory.make_server( server, *client );
+    ASSERT( status==MyFactory::st_success, NULL );
+    client->set_server( server );
+    client->expect_close_connection = true;
+    server->request_close_connection();
+    // Client deletes itself when it sees call to acknowledge_close_connection from server.
+    factory.close();
+}
+
+void Initialize()
+{
+    MyClient* client = new MyClient;
+    client->initialize( 1, Nesting(), ClientStackSize[0] );
+    MyFactory factory;
+    memset( &factory, 0, sizeof(factory) );
+    factory.open();
+    MyFactory::server_type* server; 
+    factory.make_server( server, *client );
+    client->set_server( server );
+    n_available_hw_threads = server->default_concurrency();
+    client->expect_close_connection = true;
+    server->request_close_connection();
+    // Client deletes itself when it sees call to acknowledge_close_connection from server.
+    factory.close();
+}
+
+int TestMain () {
+    VerifyInitialization<MyFactory,MyClient>( MaxThread );
+    Initialize();
+    SimpleTest<MyFactory,MyClient>();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_server.h b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_server.h
new file mode 100644 (file)
index 0000000..b23074d
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/* This header contains code shared by test_omp_server.cpp and test_tbb_server.cpp 
+   There is no ifndef guard - test is supposed to include this file exactly once.
+   The test is also exected to have #include of rml_omp.h or rml_tbb.h before 
+   including this header. 
+
+   This header should not use any parts of TBB that require linking in the TBB run-time. 
+   It uses a few instances of tbb::atomic<T>, all of which are completely inlined. */
+
+#include "tbb/atomic.h"
+#include "tbb/tbb_thread.h"
+#include "harness.h"
+#include "harness_memory.h"
+#include "harness_concurrency_tracker.h"
+
+//! Define TRIVIAL as 1 to test only a single client, no nesting, no extra threads.
+#define TRIVIAL 0
+
+//! Maximum number of clients 
+#if TRIVIAL 
+const size_t MaxClient = 1;
+#else
+const size_t MaxClient = 4;
+#endif
+
+const size_t ClientStackSize[MaxClient] = {
+    1000000
+#if !TRIVIAL
+   ,2000000
+   ,1000000
+   ,4000000
+#endif /* TRIVIAL */
+};
+
+const size_t OverheadStackSize = 500000;
+
+const size_t JobArraySize = 1000;
+
+static bool TestSingleConnection;
+
+static size_t N_TestConnections;
+
+#if _WIN32||_WIN64
+#include <Windows.h> /* Need Sleep */
+#else
+#include <unistd.h>  /* Need usleep */   
+#endif
+
+void MilliSleep( unsigned milliseconds ) {
+#if _WIN32||_WIN64
+    Sleep( milliseconds );
+#else
+    usleep( milliseconds*1000 );
+#endif /* _WIN32||_WIN64 */
+}
+
+class MyJob: public ::rml::job {
+public:
+    //! Enumeration for tracking states of a job.
+    enum state_t {
+        //! Job has not yet been allocated.
+        unallocated,
+        //! Is idle.
+        idle,
+        //! Has a thread working on it.
+        busy,
+        //! After call to client::cleanup 
+        clean
+    };
+    tbb::atomic<int> state;
+    tbb::atomic<int> processing_count;
+    void update( state_t new_state, state_t old_state ) {
+        int o = state.compare_and_swap(new_state,old_state);
+        ASSERT( o==old_state, "illegal transition" );
+    }
+    void update_from_either( state_t new_state, state_t old_state1, state_t old_state2 ) {
+        int snapshot;
+        do {
+            snapshot = state;
+            ASSERT( snapshot==old_state1||snapshot==old_state2, "illegal transition" );
+        } while( state.compare_and_swap(new_state,snapshot)!=snapshot );
+    }
+    MyJob() {
+        state=unallocated;
+        processing_count=0;
+    }
+    ~MyJob() {
+        // Overwrite so that accidental use after destruction can be detected.
+        memset(this,-1,sizeof(*this));
+    }
+};
+
+static tbb::atomic<int> ClientConstructions;
+static tbb::atomic<int> ClientDestructions;
+
+struct Nesting {
+    int level;
+    int limit;
+    Nesting() : level(0), limit(0) {}
+    Nesting( int level_, int limit_ ) : level(level_), limit(limit_) {}
+};
+
+template<typename Client>
+class ClientBase: public Client {
+protected:
+    typedef typename Client::size_type size_type;
+    typedef typename Client::version_type version_type;
+    typedef typename Client::policy_type policy_type;
+    typedef typename Client::job job;
+private:
+    size_type my_max_job_count;
+    size_t my_stack_size;
+    tbb::atomic<size_t> next_job_index;
+    int my_client_id;
+    rml::server* my_server;
+
+public:
+    enum state_t {
+        //! Treat *this as constructed.
+        live=0x1,
+        //! Treat *this as destroyed.
+        destroyed=0xDEAD
+    };
+
+    tbb::atomic<int> state;
+    void update( state_t new_state, state_t old_state ) {
+        int o = state.compare_and_swap(new_state,old_state);
+        ASSERT( o==old_state, NULL );
+    }
+
+    tbb::atomic<bool> expect_close_connection;
+
+    MyJob *job_array;
+    /*override*/version_type version() const {
+        ASSERT( state==live, NULL );
+        return 1;
+    }
+    /*override*/size_type max_job_count() const {
+        ASSERT( state==live, NULL );
+        return my_max_job_count;
+    }
+
+    /*override*/size_t min_stack_size() const {
+        ASSERT( state==live, NULL );
+        return my_stack_size;
+    }
+
+    /*override*/policy_type policy() const {return Client::throughput;} 
+
+    /*override*/void acknowledge_close_connection() {
+        ASSERT( expect_close_connection, NULL );
+        for( size_t k=next_job_index; k>0; ) {
+            --k;
+            ASSERT( job_array[k].state==MyJob::clean, NULL );
+        }
+        delete[] job_array;
+        job_array = NULL;
+        ASSERT( my_server, NULL );
+        update( destroyed, live );
+        delete this; 
+    }
+
+    /*override*/void cleanup( job& j_ ) {
+        REMARK("client %d: cleanup(%p) called\n",client_id(),&j_);
+        ASSERT( state==live, NULL );
+        MyJob& j = static_cast<MyJob&>(j_);
+        while( j.state==MyJob::busy )
+            my_server->yield();
+        j.update(MyJob::clean,MyJob::idle);
+        REMARK("client %d: cleanup(%p) returns\n",client_id(),&j_);
+    }
+   
+    job* create_one_job();
+
+protected:
+    void do_process( job& j_ ) {
+        ASSERT( state==live, NULL );
+        MyJob& j = static_cast<MyJob&>(j_);
+        ASSERT( &j, NULL );
+        j.update(MyJob::busy,MyJob::idle);
+        // use of the plain addition (not the atomic increment) is intentonial
+        j.processing_count = j.processing_count + 1;
+        ASSERT( my_stack_size>OverheadStackSize, NULL ); 
+#ifdef __ia64__
+        // Half of the stack is reserved for RSE, so test only remaining half.
+        UseStackSpace( (my_stack_size-OverheadStackSize)/2 );
+#else
+        UseStackSpace( my_stack_size-OverheadStackSize );
+#endif
+        j.update(MyJob::idle,MyJob::busy);
+        my_server->yield();
+    } 
+public:
+    ClientBase() : my_server(NULL) {
+        my_client_id = ClientConstructions++;
+        next_job_index = 0; 
+    }
+    int client_id() const {return my_client_id;}
+
+    Nesting nesting;
+
+    void initialize( size_type max_job_count, Nesting nesting_, size_t stack_size ) {
+        ASSERT( stack_size>0, NULL );
+        my_max_job_count = max_job_count;
+        nesting = nesting_;
+        my_stack_size = stack_size;
+        job_array = new MyJob[JobArraySize];
+        expect_close_connection = false;
+        state = live;
+    }
+
+    void set_server( rml::server* s ) {my_server=s;}
+
+    unsigned default_concurrency() const { ASSERT( my_server, NULL); return my_server->default_concurrency(); }
+
+    virtual ~ClientBase() {
+        ASSERT( state==destroyed, NULL );
+        ++ClientDestructions;
+    }
+};
+
+template<typename Client>
+typename Client::job* ClientBase<Client>::create_one_job() {
+    REMARK("client %d: create_one_job() called\n",client_id());
+    size_t k = next_job_index++;
+    ASSERT( state==live, NULL );
+    // Following assertion depends on assumption that implementation does not destroy jobs until 
+    // the connection is closed.  If the implementation is changed to destroy jobs sooner, the 
+    // test logic in this header will have to be reworked.
+    ASSERT( k<my_max_job_count, "RML allocated more than max_job_count jobs simultaneously" );
+    ASSERT( k<JobArraySize, "JobArraySize not big enough (problem is in test, not RML)" );
+    MyJob& j = job_array[k];
+    j.update(MyJob::idle,MyJob::unallocated);
+    REMARK("client %d: create_one_job() for k=%d returns %p\n",client_id(),int(k),&j);
+    return &j;
+}
+
+struct warning_tracker {
+    tbb::atomic<int> n_more_than_available;
+    tbb::atomic<int> n_too_many_threads;
+    tbb::atomic<int> n_system_overload;
+    warning_tracker() {
+        n_more_than_available = 0;
+        n_too_many_threads = 0;
+        n_system_overload = 0;
+    }
+    bool all_set() { return n_more_than_available>0 && n_too_many_threads>0 && n_system_overload>0; }
+} tracker;
+
+class Checker {
+public:
+    int default_concurrency;
+    void check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const;
+    Checker( rml::server& server ) : default_concurrency(int(server.default_concurrency())) {}
+};
+
+void Checker::check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const {
+    ASSERT( default_concurrency>=0, NULL );
+    if( tracker.all_set() ) return;
+    // Check that number of threads delivered is reasonable.
+    int n_avail = default_concurrency;
+    if( n_extra>0 )
+        n_avail-=n_extra;
+    if( n_avail<0 ) 
+        n_avail=0;
+    if( n_requested>default_concurrency ) 
+        n_avail += n_requested-default_concurrency;
+    int n_expected = n_requested;
+    if( n_expected>n_avail )
+        n_expected=n_avail;
+    const char* msg = NULL;
+    if( n_delivered>n_avail ) {
+        if( ++tracker.n_more_than_available>1 )
+            return;
+        msg = "server delivered more threads than were theoretically available";
+    } else if( n_delivered>n_expected ) {
+        if( ++tracker.n_too_many_threads>1 )
+            return;
+        msg = "server delivered more threads than expected";
+    } else if( n_delivered<n_expected ) {
+        if( ++tracker.n_system_overload>1 )
+            return;
+        msg = "server delivered fewer threads than ideal; or, the system is overloaded?";
+    }
+    if( msg ) {
+        REPORT("Warning: %s (n_delivered=%d n_avail=%d n_requested=%d n_extra=%d default_concurrency=%d)\n",
+               msg, n_delivered, n_avail, n_requested, n_extra, default_concurrency );
+    }
+}
+
+template<typename Factory,typename Client>
+class DoOneConnection: NoAssign {
+    //! Number of threads to request
+    const int n_thread;
+    //! Nesting 
+    const Nesting nesting;
+    //! Number of extra threads to pretend having outside the RML
+    const int n_extra;
+    //! If true, check number of threads actually delivered.
+    const bool check_delivered;
+public:
+    DoOneConnection( int n_thread_, Nesting nesting_, int n_extra_, bool check_delivered_ ) : 
+        n_thread(n_thread_), 
+        nesting(nesting_), 
+        n_extra(n_extra_), 
+        check_delivered(check_delivered_)
+    {
+    }
+   
+    //! Test ith connection 
+    void operator()( size_t i ) const;
+};
+
+template<typename Factory,typename Client>
+void DoOneConnection<Factory,Client>::operator()( size_t i ) const {
+    ASSERT( i<MaxClient, NULL );
+    Client* client = new Client;
+    client->initialize( Client::is_omp ? JobArraySize : n_thread, nesting, ClientStackSize[i] );
+    Factory factory;
+    memset( &factory, 0, sizeof(factory) );
+    typename Factory::status_type status = factory.open();
+    ASSERT( status==Factory::st_success, NULL );
+
+    typename Factory::server_type* server; 
+    status = factory.make_server( server, *client );
+    ASSERT( status==Factory::st_success, NULL );
+    Harness::ConcurrencyTracker ct;
+    REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n",
+               client->client_id(), n_thread, nesting.level, nesting.limit);
+    client->set_server( server );
+    Checker checker( *server );
+    FireUpJobs( *server, *client, n_thread, n_extra, check_delivered && !client->is_strict() ? &checker : NULL );
+
+    // Close the connection
+    client->expect_close_connection = true;
+    REMARK("client %d: calling request_close_connection\n", client->client_id());
+#if !RML_USE_WCRM
+    int default_concurrency = server->default_concurrency();
+#endif
+    server->request_close_connection();
+    // Client deletes itself when it sees call to acknowledge_close_connection from server.
+    factory.close();
+#if !RML_USE_WCRM
+    if( TestSingleConnection )
+        __TBB_ASSERT_EX( uintptr_t(factory.scratch_ptr)==uintptr_t(default_concurrency), "under/over subscription?" );
+#endif
+}
+
+//! Test with n_threads threads and n_client clients.
+template<typename Factory, typename Client>
+void SimpleTest() {
+    Harness::ConcurrencyTracker::Reset();
+    TestSingleConnection = true;
+    N_TestConnections = 1;
+    for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) {
+        // Test a single connection, no nesting, no extra threads
+        DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),0,false);
+        doc(0);
+    }
+#if !TRIVIAL
+    TestSingleConnection = false;
+    for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) {
+        // Test parallel connections
+        for( int n_client=1; n_client<=int(MaxClient); ++n_client ) {
+            N_TestConnections = n_client;
+            REMARK("SimpleTest: n_thread=%d n_client=%d\n",n_thread,n_client);
+            NativeParallelFor( n_client, DoOneConnection<Factory,Client>(n_thread,Nesting(0,0),0,false) );
+        }
+        // Test server::independent_thread_number_changed
+        N_TestConnections = 1;
+        for( int n_extra=-4; n_extra<=32; n_extra=n_extra+1+n_extra/5 ) {
+            DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),n_extra,true);
+            doc(0);
+        }
+#if !RML_USE_WCRM
+        // Test nested connections
+        DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,2),0,false);
+        doc(0);
+#endif
+    }
+    ASSERT( Harness::ConcurrencyTracker::PeakParallelism()>1, "No multiple connections exercised?" );
+#endif /* !TRIVIAL */
+    // Let RML catch up.
+    while( ClientConstructions!=ClientDestructions )
+        MilliSleep(1);
+}
+
+static void check_server_info( void* arg, const char* server_info )
+{
+    ASSERT( strstr(server_info, (char*)arg), NULL );
+}
+
+template<typename Factory, typename Client>
+void VerifyInitialization( int n_thread ) {
+    Client* client = new Client;
+    client->initialize( Client::is_omp ? JobArraySize : n_thread, Nesting(), ClientStackSize[0] );
+    Factory factory;
+    memset( &factory, 0, sizeof(factory) );
+    typename Factory::status_type status = factory.open();
+    ASSERT( status!=Factory::st_not_found, "could not find RML library" );
+    ASSERT( status!=Factory::st_incompatible, NULL );
+    ASSERT( status==Factory::st_success, NULL );
+    factory.call_with_server_info( check_server_info, (void*)"Intel(R) RML library" );
+    typename Factory::server_type* server; 
+    status = factory.make_server( server, *client );
+    ASSERT( status!=Factory::st_incompatible, NULL );
+    ASSERT( status!=Factory::st_not_found, NULL );
+    ASSERT( status==Factory::st_success, NULL );
+    REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n",
+               client->client_id(), n_thread, 0, 0);
+    ASSERT( server, NULL );
+    client->set_server( server );
+
+    DoClientSpecificVerification( *server, n_thread );
+
+    // Close the connection
+    client->expect_close_connection = true;
+    REMARK("client %d: calling request_close_connection\n", client->client_id());
+    server->request_close_connection();
+    // Client deletes itself when it sees call to acknowledge_close_connection from server.
+    factory.close();
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_thread_monitor.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/rml/test/test_thread_monitor.cpp
new file mode 100644 (file)
index 0000000..9ba0345
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "thread_monitor.h"
+#include "harness.h"
+#include "harness_memory.h"
+
+class ThreadState {
+    void loop();
+public:
+    static __RML_DECL_THREAD_ROUTINE routine( void* arg ) {
+        static_cast<ThreadState*>(arg)->loop();
+        return 0;
+    }
+    typedef rml::internal::thread_monitor thread_monitor;
+    thread_monitor monitor;
+    volatile int request;
+    volatile int ack;
+    volatile unsigned clock;
+    volatile unsigned stamp;
+    ThreadState() : request(-1), ack(-1), clock(0) {}
+};
+
+void ThreadState::loop() {
+    for(;;) {
+        ++clock;
+        if( ack==request ) {
+            thread_monitor::cookie c;
+            monitor.prepare_wait(c);
+            if( ack==request ) {
+                REMARK("%p: request=%d ack=%d\n", this, request, ack );
+                monitor.commit_wait(c);
+            } else
+                monitor.cancel_wait();
+        } else {
+            // Throw in delay occasionally
+            switch( request%8 ) {
+                case 0: 
+                case 1:
+                case 5:
+                    rml::internal::thread_monitor::yield();
+            }
+            int r = request;
+            ack = request;
+            if( !r ) return;
+        }
+    }
+}
+
+// Linux on Itanium seems to require at least 1<<18 bytes per stack.
+const size_t MinStackSize = 1<<18;
+const size_t MaxStackSize = 1<<22;
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        ThreadState* t = new ThreadState[p];
+        for( size_t stack_size = MinStackSize; stack_size<=MaxStackSize; stack_size*=2 ) {
+            REMARK("launching %d threads\n",p);
+            for( int i=0; i<p; ++i )
+                rml::internal::thread_monitor::launch( ThreadState::routine, t+i, stack_size ); 
+            for( int k=1000; k>=0; --k ) {
+                if( k%8==0 ) {
+                    // Wait for threads to wait.
+                    for( int i=0; i<p; ++i ) {
+                        unsigned count = 0;
+                        do {
+                            t[i].stamp = t[i].clock;
+                            rml::internal::thread_monitor::yield();
+                            if( ++count>=1000 ) {
+                                REPORT("Warning: thread %d not waiting\n",i);
+                                break;
+                            }
+                        } while( t[i].stamp!=t[i].clock );
+                    }
+                }
+                REMARK("notifying threads\n");
+                for( int i=0; i<p; ++i ) {
+                    // Change state visible to launched thread
+                    t[i].request = k;
+                    t[i].monitor.notify();
+                }
+                REMARK("waiting for threads to respond\n");
+                for( int i=0; i<p; ++i ) 
+                    // Wait for thread to respond 
+                    while( t[i].ack!=k ) 
+                        rml::internal::thread_monitor::yield();
+            }
+        }
+        delete[] t;
+    }
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.cpp
new file mode 100644 (file)
index 0000000..e645cbe
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "arena.h"
+#include "governor.h"
+#include "scheduler.h"
+#include "itt_notify.h"
+
+#if __TBB_STATISTICS_STDOUT
+#include <cstdio>
+#endif
+
+namespace tbb {
+namespace internal {
+
+#if !__TBB_ARENA_PER_MASTER
+//------------------------------------------------------------------------
+// UnpaddedArenaPrefix
+//------------------------------------------------------------------------
+inline arena& UnpaddedArenaPrefix::Arena() {
+    return *static_cast<tbb::internal::arena*>(static_cast<void*>( static_cast<ArenaPrefix*>(this)+1 ));
+}
+
+void UnpaddedArenaPrefix::process( job& j ) {
+    generic_scheduler& s = static_cast<generic_scheduler&>(j);
+    __TBB_ASSERT( governor::is_set(&s), NULL );
+    __TBB_ASSERT( !s.innermost_running_task, NULL );
+    // Try to steal a task.
+    // Passing reference count is technically unnecessary in this context,
+    // but omitting it here would add checks inside the function.
+    task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true );
+    if (t) {
+        // A side effect of receive_or_steal_task is that innermost_running_task can be set.
+        // But for the outermost dispatch loop of a worker it has to be NULL.
+        s.innermost_running_task = NULL;
+        s.local_wait_for_all(*s.dummy_task,t);
+    }
+    __TBB_ASSERT( s.inbox.is_idle_state(true), NULL );
+    __TBB_ASSERT( !s.innermost_running_task, NULL );
+}
+
+void UnpaddedArenaPrefix::cleanup( job& j ) {
+    generic_scheduler& s = static_cast<generic_scheduler&>(j);
+    if( !governor::is_set( &s ) ) {
+        bool is_master = governor::is_set( NULL );
+        governor::assume_scheduler( &s );
+        generic_scheduler::cleanup_worker( &s, !is_master );
+        governor::assume_scheduler( NULL );
+    } else {
+        generic_scheduler::cleanup_worker( &s, true );
+    }
+}
+
+void UnpaddedArenaPrefix::acknowledge_close_connection() {
+    Arena().free_arena();
+}
+
+::rml::job* UnpaddedArenaPrefix::create_one_job() {
+    generic_scheduler* s = generic_scheduler::create_worker( Arena(), next_job_index++ );
+    governor::sign_on(s);
+    return s;
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+//------------------------------------------------------------------------
+// arena
+//------------------------------------------------------------------------
+
+#if __TBB_ARENA_PER_MASTER
+
+void arena::process( generic_scheduler& s ) {
+    __TBB_ASSERT( is_alive(my_guard), NULL );
+    __TBB_ASSERT( governor::is_set(&s), NULL );
+    __TBB_ASSERT( !s.innermost_running_task, NULL );
+
+    __TBB_ASSERT( my_num_slots != 1, NULL );
+    // Start search for an empty slot from the one we occupied the last time
+    unsigned index = s.arena_index < my_num_slots ? s.arena_index : s.random.get() % (my_num_slots - 1) + 1,
+             end = index;
+    __TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" );
+    __TBB_ASSERT( index < my_num_slots, NULL );
+
+    // Find a vacant slot
+    for ( ;; ) {
+        if ( !slot[index].my_scheduler && __TBB_CompareAndSwapW( &slot[index].my_scheduler, (intptr_t)&s, 0 ) == 0 )
+            break;
+        if ( ++index == my_num_slots )
+            index = 1;
+        if ( index == end ) {
+            // Likely this arena is already saturated
+            if ( --my_num_threads_active == 0 )
+                close_arena();
+            return;
+        }
+    }
+    ITT_NOTIFY(sync_acquired, &slot[index]);
+    s.my_arena = this;
+    s.arena_index = index;
+    s.attach_mailbox( affinity_id(index+1) );
+
+    slot[index].hint_for_push = index ^ unsigned(&s-(generic_scheduler*)NULL)>>16; // randomizer seed
+    slot[index].hint_for_pop  = index; // initial value for round-robin
+
+    unsigned new_limit = index + 1;
+    unsigned old_limit = my_limit;
+    while ( new_limit > old_limit ) {
+        if ( my_limit.compare_and_swap(new_limit, old_limit) == old_limit )
+            break;
+        old_limit = my_limit;
+    }
+
+    for ( ;; ) {
+        // Try to steal a task.
+        // Passing reference count is technically unnecessary in this context,
+        // but omitting it here would add checks inside the function.
+        __TBB_ASSERT( is_alive(my_guard), NULL );
+        task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true );
+        if (t) {
+            // A side effect of receive_or_steal_task is that innermost_running_task can be set.
+            // But for the outermost dispatch loop of a worker it has to be NULL.
+            s.innermost_running_task = NULL;
+            s.local_wait_for_all(*s.dummy_task,t);
+        }
+        ++my_num_threads_leaving;
+        __TBB_ASSERT ( slot[index].head == slot[index].tail, "Worker cannot leave arena while its task pool is not empty" );
+        __TBB_ASSERT( slot[index].task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" );
+        // Revalidate quitting condition
+        // This check prevents relinquishing more than necessary workers because 
+        // of the non-atomicity of the decision making procedure
+        if ( num_workers_active() >= my_num_workers_allotted || !my_num_workers_requested )
+            break;
+        --my_num_threads_leaving;
+        __TBB_ASSERT( !slot[0].my_scheduler || my_num_threads_active > 0, "Who requested more workers after the last one left the dispatch loop and the master's gone?" );
+    }
+#if __TBB_STATISTICS
+    ++s.my_counters.arena_roundtrips;
+    *slot[index].my_counters += s.my_counters;
+    s.my_counters.reset();
+#endif /* __TBB_STATISTICS */
+    __TBB_store_with_release( slot[index].my_scheduler, (generic_scheduler*)NULL );
+    s.inbox.detach();
+    __TBB_ASSERT( s.inbox.is_idle_state(true), NULL );
+    __TBB_ASSERT( !s.innermost_running_task, NULL );
+    __TBB_ASSERT( is_alive(my_guard), NULL );
+    // Decrementing my_num_threads_active first prevents extra workers from leaving
+    // this arena prematurely, but can result in some workers returning back just
+    // to repeat the escape attempt. If instead my_num_threads_leaving is decremented
+    // first, the result is the opposite - premature leaving is allowed and gratuitous
+    // return is prevented. Since such a race has any likelihood only when multiple
+    // workers are in the stealing loop, and consequently there is a lack of parallel
+    // work in this arena, we'd rather let them go out and try get employment in 
+    // other arenas (before returning into this one again).
+    --my_num_threads_leaving;
+    if ( !--my_num_threads_active )
+        close_arena();
+}
+
+arena::arena ( market& m, unsigned max_num_workers ) {
+    __TBB_ASSERT( !my_guard, "improperly allocated arena?" );
+    __TBB_ASSERT( sizeof(slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" );
+    __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, "arena misaligned" );
+    my_market = &m;
+    my_limit = 1;
+    // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks).
+    my_num_slots = max(2u, max_num_workers + 1);
+    my_max_num_workers = max_num_workers;
+    my_num_threads_active = 1; // accounts for the master
+    __TBB_ASSERT ( my_max_num_workers < my_num_slots, NULL );
+    // Construct mailboxes. Mark internal synchronization elements for the tools.
+    for( unsigned i = 0; i < my_num_slots; ++i ) {
+        __TBB_ASSERT( !slot[i].my_scheduler && !slot[i].task_pool, NULL );
+        ITT_SYNC_CREATE(slot + i, SyncType_Scheduler, SyncObj_WorkerTaskPool);
+        mailbox(i+1).construct();
+        ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox);
+#if __TBB_STATISTICS
+        slot[i].my_counters = new ( NFS_Allocate(sizeof(statistics_counters), 1, NULL) ) statistics_counters;
+#endif /* __TBB_STATISTICS */
+    }
+    my_task_stream.initialize(my_num_slots);
+    ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream);
+    my_mandatory_concurrency = false;
+#if __TBB_TASK_GROUP_CONTEXT
+    my_master_default_ctx = NULL;
+#endif
+}
+
+arena& arena::allocate_arena( market& m, unsigned max_num_workers ) {
+    __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), "All arena data fields must go to arena_base" );
+    __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, "arena slots area misaligned: wrong padding" );
+    __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, "Mailbox padding is wrong" );
+
+    unsigned num_slots = max(2u, max_num_workers + 1);
+    size_t n = sizeof(base_type) + num_slots * (sizeof(mail_outbox) + sizeof(arena_slot));
+
+    unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL );
+    // Zero all slots to indicate that they are empty
+    memset( storage, 0, n );
+    return *new( storage + num_slots * sizeof(mail_outbox) ) arena(m, max_num_workers);
+}
+
+void arena::free_arena () {
+    __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" );
+    poison_value( my_guard );
+    intptr_t drained = 0;
+    for ( unsigned i = 1; i <= my_num_slots; ++i )
+        drained += mailbox(i).drain();
+    __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed");
+#if __TBB_COUNT_TASK_NODES
+    my_market->update_task_node_count( -drained );
+#endif /* __TBB_COUNT_TASK_NODES */
+    my_market->release();
+#if __TBB_TASK_GROUP_CONTEXT
+    __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" );
+    my_master_default_ctx->~task_group_context();
+    NFS_Free(my_master_default_ctx);
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+#if __TBB_STATISTICS
+    for( unsigned i = 0; i < my_num_slots; ++i )
+        NFS_Free( slot[i].my_counters );
+#endif /* __TBB_STATISTICS */
+    void* storage  = &mailbox(my_num_slots);
+    this->~arena();
+    NFS_Free( storage );
+}
+
+void arena::close_arena () {
+#if !__TBB_STATISTICS_EARLY_DUMP
+    GATHER_STATISTIC( dump_arena_statistics() );
+#endif
+    my_market->detach_arena( *this );
+    free_arena();
+}
+
+#if __TBB_STATISTICS
+void arena::dump_arena_statistics () {
+    statistics_counters total;
+    for( unsigned i = 0; i < my_num_slots; ++i ) {
+#if __TBB_STATISTICS_EARLY_DUMP
+        generic_scheduler* s = slot[i].my_scheduler;
+        if ( s )
+            *slot[i].my_counters += s->my_counters;
+#else
+        __TBB_ASSERT( !slot[i].my_scheduler, NULL );
+#endif
+        if ( i != 0 ) {
+            total += *slot[i].my_counters;
+            dump_statistics( *slot[i].my_counters, i );
+        }
+    }
+    dump_statistics( *slot[0].my_counters, 0 );
+#if __TBB_STATISTICS_STDOUT
+    printf( "----------------------------------------------\n" );
+    dump_statistics( total, workers_counters_total );
+    total += *slot[0].my_counters;
+    dump_statistics( total, arena_counters_total );
+    printf( "==============================================\n" );
+#endif /* __TBB_STATISTICS_STDOUT */
+}
+#endif /* __TBB_STATISTICS */
+
+#else /* !__TBB_ARENA_PER_MASTER */
+
+arena* arena::allocate_arena( unsigned number_of_slots, unsigned number_of_workers, stack_size_type stack_size ) {
+    __TBB_ASSERT( sizeof(ArenaPrefix) % NFS_GetLineSize()==0, "ArenaPrefix not multiple of cache line size" );
+    __TBB_ASSERT( sizeof(mail_outbox)==NFS_MaxLineSize, NULL );
+    __TBB_ASSERT( stack_size>0, NULL );
+
+    size_t n = sizeof(ArenaPrefix) + number_of_slots*(sizeof(mail_outbox)+sizeof(arena_slot));
+
+    unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL );
+    // Zero all slots to indicate that they are empty
+    memset( storage, 0, n );
+    arena* a = (arena*)(storage + sizeof(ArenaPrefix)+ number_of_slots*(sizeof(mail_outbox)));
+    __TBB_ASSERT( sizeof(a->slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" );
+    __TBB_ASSERT( (uintptr_t)a % NFS_GetLineSize()==0, NULL );
+    new( &a->prefix() ) ArenaPrefix( number_of_slots, number_of_workers );
+
+    // Allocate the worker_list
+    WorkerDescriptor * w = new WorkerDescriptor[number_of_workers];
+    memset( w, 0, sizeof(WorkerDescriptor)*(number_of_workers));
+    a->prefix().worker_list = w;
+
+    // Construct mailboxes.
+    for( unsigned j=1; j<=number_of_slots; ++j ) 
+        a->mailbox(j).construct();
+
+    a->prefix().stack_size = stack_size;
+    size_t k;
+    // Mark each internal sync element for the tools
+    for( k=0; k<number_of_workers; ++k ) {
+        ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_WorkerTaskPool);
+        ITT_SYNC_CREATE(&w[k].scheduler, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt);
+        ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox);
+    }
+    for( ; k<number_of_slots; ++k ) {
+        ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_MasterTaskPool);
+        ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox);
+    }
+
+    return a;
+}
+
+void arena::free_arena () {
+    // Drain mailboxes
+    // TODO: each scheduler should plug-and-drain its own mailbox when it terminates.
+    intptr_t drain_count = 0;
+    for( unsigned i=1; i<=prefix().number_of_slots; ++i )
+        drain_count += mailbox(i).drain();
+#if __TBB_COUNT_TASK_NODES
+    prefix().task_node_count -= drain_count;
+    if( prefix().task_node_count ) {
+        runtime_warning( "Leaked %ld task objects\n", long(prefix().task_node_count) );
+    }
+#endif /* __TBB_COUNT_TASK_NODES */
+    void* storage  = &mailbox(prefix().number_of_slots);
+    delete[] prefix().worker_list;
+    prefix().~ArenaPrefix();
+    NFS_Free( storage );
+}
+
+void arena::close_arena () {
+    for(;;) {
+        pool_state_t snapshot = prefix().pool_state;
+        if( snapshot==SNAPSHOT_SERVER_GOING_AWAY ) 
+            break;
+        if( prefix().pool_state.compare_and_swap( SNAPSHOT_SERVER_GOING_AWAY, snapshot )==snapshot ) {
+            if( snapshot!=SNAPSHOT_EMPTY )
+                prefix().server->adjust_job_count_estimate( -int(prefix().number_of_workers) );
+            break;
+        }
+    }
+    prefix().server->request_close_connection();
+}
+
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+bool arena::is_out_of_work() {
+    // TODO: rework it to return at least a hint about where a task was found; better if the task itself.
+    for(;;) {
+        pool_state_t snapshot = prefix().pool_state;
+        switch( snapshot ) {
+            case SNAPSHOT_EMPTY:
+#if !__TBB_ARENA_PER_MASTER
+            case SNAPSHOT_SERVER_GOING_AWAY:
+#endif /* !__TBB_ARENA_PER_MASTER */
+                return true;
+            case SNAPSHOT_FULL: {
+                // Use unique id for "busy" in order to avoid ABA problems.
+                const pool_state_t busy = pool_state_t(this);
+                // Request permission to take snapshot
+                if( prefix().pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
+                    // Got permission.  Take the snapshot.
+#if __TBB_ARENA_PER_MASTER
+                    size_t n = my_limit;
+#else /* !__TBB_ARENA_PER_MASTER */
+                    size_t n = prefix().limit;
+#endif /* !__TBB_ARENA_PER_MASTER */
+                    size_t k; 
+                    for( k=0; k<n; ++k ) 
+                        if( slot[k].task_pool != EmptyTaskPool && slot[k].head < slot[k].tail )
+                            break;
+                    bool work_absent = k>=n;
+#if __TBB_ARENA_PER_MASTER
+                    work_absent = work_absent && my_task_stream.empty();
+#endif /* __TBB_ARENA_PER_MASTER */
+                    // Test and test-and-set.
+                    if( prefix().pool_state==busy ) {
+                        if( work_absent ) {
+#if __TBB_ARENA_PER_MASTER
+                            // save current demand value before setting SNAPSHOT_EMPTY,
+                            // to avoid race with advertise_new_work.
+                            int current_demand = (int)my_max_num_workers;
+#endif
+                            if( prefix().pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
+                                // This thread transitioned pool to empty state, and thus is responsible for
+                                // telling RML that there is no other work to do.
+#if __TBB_ARENA_PER_MASTER
+                                my_market->adjust_demand( *this, -current_demand );
+#else /* !__TBB_ARENA_PER_MASTER */
+                                prefix().server->adjust_job_count_estimate( -int(prefix().number_of_workers) );
+#endif /* !__TBB_ARENA_PER_MASTER */
+                                return true;
+                            }
+                        } else {
+                            // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it.
+                            prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
+                        }
+                    }
+                } 
+                return false;
+            }
+            default:
+                // Another thread is taking a snapshot.
+                return false;
+        }
+    }
+}
+
+#if __TBB_COUNT_TASK_NODES 
+intptr_t arena::workers_task_node_count() {
+    intptr_t result = 0;
+#if __TBB_ARENA_PER_MASTER
+    for( unsigned i = 1; i < my_num_slots; ++i ) {
+        generic_scheduler* s = slot[i].my_scheduler;
+#else /* !__TBB_ARENA_PER_MASTER */
+    for( unsigned i=0; i<prefix().number_of_workers; ++i ) {
+        generic_scheduler* s = prefix().worker_list[i].scheduler;
+#endif /* !__TBB_ARENA_PER_MASTER */
+        if( s )
+            result += s->task_node_count;
+    }
+    return result;
+}
+#endif /* __TBB_COUNT_TASK_NODES */
+
+} // namespace internal
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/arena.h
new file mode 100644 (file)
index 0000000..495ddc0
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_arena_H
+#define _TBB_arena_H
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/atomic.h"
+
+#if __TBB_ARENA_PER_MASTER
+#include "scheduler_common.h"
+#include "market.h"
+#include "intrusive_list.h"
+#include "task_stream.h"
+#else /* !__TBB_ARENA_PER_MASTER */
+#include "../rml/include/rml_tbb.h"
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+#include "mailbox.h"
+
+namespace tbb {
+
+#if __TBB_ARENA_PER_MASTER
+class task_group_context;
+class allocate_root_with_context_proxy;
+#endif /* __TBB_ARENA_PER_MASTER */
+
+namespace internal {
+
+class governor;
+class arena;
+class generic_scheduler;
+template<typename SchedulerTraits> class custom_scheduler;
+
+#if !__TBB_ARENA_PER_MASTER
+//------------------------------------------------------------------------
+// UnpaddedArenaPrefix
+//------------------------------------------------------------------------
+
+struct WorkerDescriptor {
+    //! NULL until worker is published.  -1 if worker should not be published.
+    generic_scheduler* scheduler;
+};
+
+//! The useful contents of an ArenaPrefix
+class UnpaddedArenaPrefix: no_copy, rml::tbb_client {
+    friend class generic_scheduler;
+    template<typename SchedulerTraits> friend class custom_scheduler;
+    friend class arena;
+    friend class governor;
+    friend struct WorkerDescriptor;
+
+    //! Arena slot to try to acquire first for the next new master.
+    unsigned limit;
+
+    //! Number of masters that own this arena.
+    /** This may be smaller than the number of masters who have entered the arena. */
+    unsigned number_of_masters;
+
+    //! Total number of slots in the arena
+    const unsigned number_of_slots;
+
+    //! Number of workers that belong to this arena
+    const unsigned number_of_workers;
+
+    //! Pointer to the RML server object that services requests for this arena.
+    rml::tbb_server* server;
+
+    //! Counter used to allocate job indices
+    tbb::atomic<size_t> next_job_index;
+
+    //! Stack size of worker threads
+    size_t stack_size;
+
+    //! Array of workers.
+    WorkerDescriptor* worker_list;
+
+#if __TBB_COUNT_TASK_NODES
+    //! Net number of nodes that have been allocated from heap.
+    /** Updated each time a scheduler is destroyed. */
+    atomic<intptr_t> task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+
+    //! Estimate of number of available tasks.  
+    /** The estimate is either 0 (SNAPSHOT_EMPTY), infinity (SNAPSHOT_FULL), or a special value. 
+        The implementation of arena::is_busy_or_empty requires that pool_state_t be unsigned. */
+    typedef uintptr_t pool_state_t;
+
+    //! Current estimate of number of available tasks.  
+    tbb::atomic<pool_state_t> pool_state;
+protected:
+    UnpaddedArenaPrefix( unsigned number_of_slots_, unsigned number_of_workers_ ) :
+        number_of_masters(1),
+        number_of_slots(number_of_slots_),
+        number_of_workers(number_of_workers_)
+    {
+#if __TBB_COUNT_TASK_NODES
+        task_node_count = 0;
+#endif /* __TBB_COUNT_TASK_NODES */
+        limit = number_of_workers_;
+        server = NULL;
+        stack_size = 0;
+        next_job_index = 0;
+    }
+        
+private:
+    //! Return reference to corresponding arena.
+    arena& Arena();
+
+    /*override*/ version_type version() const {
+        return 0;
+    }
+
+    /*override*/ unsigned max_job_count() const {
+        return number_of_workers;
+    }
+
+    /*override*/ size_t min_stack_size() const {
+        return stack_size;
+    }
+
+    /*override*/ policy_type policy() const {
+        return throughput;
+    }
+
+    /*override*/ job* create_one_job();
+
+    /*override*/ void cleanup( job& j );
+
+    /*override*/ void acknowledge_close_connection();
+
+    /*override*/ void process( job& j );
+}; // class UnpaddedArenaPrefix
+
+//------------------------------------------------------------------------
+// ArenaPrefix
+//------------------------------------------------------------------------
+
+//! The prefix to arena with padding.
+class ArenaPrefix: public UnpaddedArenaPrefix {
+    //! Padding to fill out to multiple of cache line size.
+    char pad[(sizeof(UnpaddedArenaPrefix)/NFS_MaxLineSize+1)*NFS_MaxLineSize-sizeof(UnpaddedArenaPrefix)];
+
+public:
+    ArenaPrefix( unsigned number_of_slots_, unsigned number_of_workers_ ) :
+        UnpaddedArenaPrefix(number_of_slots_,number_of_workers_)
+    {
+    }
+}; // class ArenaPrefix
+
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+//------------------------------------------------------------------------
+// arena_slot
+//------------------------------------------------------------------------
+
+struct arena_slot {
+#if __TBB_ARENA_PER_MASTER
+    //! Scheduler of the thread attached to the slot
+    /** Marks the slot as busy, and is used to iterate through the schedulers belonging to this arena **/
+    generic_scheduler* my_scheduler;
+#endif /* __TBB_ARENA_PER_MASTER */
+
+    // Task pool (the deque of task pointers) of the scheduler that owns this slot
+    /** Also is used to specify if the slot is empty or locked:
+         0 - empty
+        -1 - locked **/
+    task** task_pool;
+
+    //! Index of the first ready task in the deque.
+    /** Modified by thieves, and by the owner during compaction/reallocation **/
+    size_t head;
+
+    //! Padding to avoid false sharing caused by the thieves accessing this slot
+    char pad1[NFS_MaxLineSize - sizeof(size_t) - sizeof(task**)
+#if __TBB_ARENA_PER_MASTER
+              - sizeof(generic_scheduler*)
+#endif /* __TBB_ARENA_PER_MASTER */
+             ];
+
+    //! Index of the element following the last ready task in the deque.
+    /** Modified by the owner thread. **/
+    size_t tail;
+
+#if __TBB_ARENA_PER_MASTER
+    //! Hints provided for operations with the container of starvation-resistant tasks.
+    /** Modified by the owner thread (during these operations). **/
+    unsigned hint_for_push, hint_for_pop;
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#if __TBB_STATISTICS
+    //! Set of counters to accumulate internal statistics related to this arena
+    statistics_counters *my_counters;
+#endif /* __TBB_STATISTICS */
+    //! Padding to avoid false sharing caused by the thieves accessing the next slot
+    char pad2[NFS_MaxLineSize - sizeof(size_t)
+#if __TBB_ARENA_PER_MASTER
+              - 2*sizeof(unsigned)
+#endif /* __TBB_ARENA_PER_MASTER */
+#if __TBB_STATISTICS
+              - sizeof(statistics_counters*)
+#endif /* __TBB_STATISTICS */
+             ];
+}; // class arena_slot
+
+//------------------------------------------------------------------------
+// arena
+//------------------------------------------------------------------------
+
+#if __TBB_ARENA_PER_MASTER
+
+//! arena data except the array of slots
+/** Separated in order to simplify padding. 
+    Intrusive list node base class is used by market to form a list of arenas. **/
+struct arena_base : intrusive_list_node {
+    //! Market owning this arena
+    market* my_market;
+
+    //! Maximal currently busy slot.
+    atomic<unsigned> my_limit;
+
+    //! Number of slots in the arena
+    unsigned my_num_slots;
+
+    //! Number of workers requested by the master thread owning the arena
+    unsigned my_max_num_workers;
+
+    //! Number of workers that are currently requested from the resource manager
+    atomic<int> my_num_workers_requested;
+
+    //! Number of workers that have been marked out by the resource manager to service the arena
+    unsigned my_num_workers_allotted;
+
+    //! Number of threads in the arena at the moment
+    /** Consists of the workers servicing the arena and one master until it starts 
+        arena shutdown and detaches from it. Plays the role of the arena's ref count. **/
+    atomic<unsigned> my_num_threads_active;
+
+    //! Number of threads that has exited the dispatch loop but has not left the arena yet
+    atomic<unsigned> my_num_threads_leaving;
+
+    //! Current task pool state and estimate of available tasks amount.
+    /** The estimate is either 0 (SNAPSHOT_EMPTY) or infinity (SNAPSHOT_FULL). 
+        Special state is "busy" (any other unsigned value). 
+        Note that the implementation of arena::is_busy_or_empty() requires 
+        pool_state to be unsigned. */
+    tbb::atomic<uintptr_t> pool_state;
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Pointer to the "default" task_group_context allocated by the arena's master.
+    task_group_context* my_master_default_ctx;
+#endif
+
+    //! The task pool that guarantees eventual execution even if new tasks are constantly coming.
+    task_stream my_task_stream;
+
+    bool my_mandatory_concurrency;
+
+#if TBB_USE_ASSERT
+    uintptr_t my_guard;
+#endif /* TBB_USE_ASSERT */
+}; // struct arena_base
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+class arena
+#if __TBB_ARENA_PER_MASTER
+#if (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__==0) && !__INTEL_COMPILER
+    : public padded<arena_base>
+#else
+    : padded<arena_base>
+#endif
+#endif /* __TBB_ARENA_PER_MASTER */
+{
+    friend class generic_scheduler;
+    template<typename SchedulerTraits> friend class custom_scheduler;
+    friend class governor;
+
+#if __TBB_ARENA_PER_MASTER
+    friend class market;
+    friend class tbb::task_group_context;
+    friend class allocate_root_with_context_proxy;
+    friend class intrusive_list<arena>;
+
+    typedef padded<arena_base> base_type;
+
+    //! Constructor
+    arena ( market&, unsigned max_num_workers );
+
+    arena& prefix() const { return const_cast<arena&>(*this); }
+
+    //! Allocate an instance of arena.
+    static arena& allocate_arena( market&, unsigned max_num_workers );
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Propagates cancellation request to all descendants of the context.
+    /** The propagation is relayed to the market because tasks created by one 
+        master thread can be passed to and executed by other masters. This means 
+        that context trees can span several arenas at once and thus cancellation
+        propagation cannot be generally localized to one arena only. **/
+    void propagate_cancellation ( task_group_context& ctx ) {
+        my_market->propagate_cancellation( ctx );
+    }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#else /* !__TBB_ARENA_PER_MASTER */
+
+    friend class UnpaddedArenaPrefix;
+    friend struct WorkerDescriptor;
+
+    //! Get reference to prefix portion
+    ArenaPrefix& prefix() const {return ((ArenaPrefix*)(void*)this)[-1];}
+
+    //! Allocate an instance of arena, and prepare everything to start workers.
+    static arena* allocate_arena( unsigned num_slots, unsigned num_workers, size_t stack_size );
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Get reference to mailbox corresponding to given affinity_id.
+    mail_outbox& mailbox( affinity_id id ) {
+        __TBB_ASSERT( 0<id, "affinity id must be positive integer" );
+#if __TBB_ARENA_PER_MASTER
+        __TBB_ASSERT( id <= my_num_slots, "affinity id out of bounds" );
+#else /* !__TBB_ARENA_PER_MASTER */
+        __TBB_ASSERT( id <= prefix().number_of_slots, "id out of bounds" );
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+        return ((mail_outbox*)&prefix())[-(int)id];
+    }
+
+    //! Completes arena shutdown, destructs and deallocates it.
+    void free_arena ();
+
+    typedef uintptr_t pool_state_t;
+
+    //! No tasks to steal since last snapshot was taken
+    static const pool_state_t SNAPSHOT_EMPTY = 0;
+
+    //! At least one task has been offered for stealing since the last snapshot started
+    static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1);
+
+#if __TBB_ARENA_PER_MASTER
+    //! No tasks to steal or snapshot is being taken.
+    static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; }
+
+    //! The number of workers active in the arena.
+    unsigned num_workers_active( ) {
+        return my_num_threads_active - my_num_threads_leaving - (slot[0].my_scheduler? 1: 0);
+    }
+
+    //! If necessary, raise a flag that there is new job in arena.
+    template<bool Spawned> void advertise_new_work();
+#else /*__TBB_ARENA_PER_MASTER*/
+    //! Server is going away and hence further calls to adjust_job_count_estimate are unsafe.
+    static const pool_state_t SNAPSHOT_SERVER_GOING_AWAY = pool_state_t(-2);
+
+    //! No tasks to steal or snapshot is being taken.
+    static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_SERVER_GOING_AWAY; }
+
+    //! If necessary, raise a flag that task was added to pool recently.
+    inline void mark_pool_full();
+#endif /* __TBB_ARENA_PER_MASTER */
+
+    //! Check if there is job anywhere in arena.
+    /** Return true if no job or if arena is being cleaned up. */
+    bool is_out_of_work();
+
+    //! Initiates arena shutdown.
+    void close_arena ();
+
+#if __TBB_ARENA_PER_MASTER
+    //! Registers the worker with the arena and enters TBB scheduler dispatch loop
+    void process( generic_scheduler& s );
+
+#if __TBB_STATISTICS
+    //! Outputs internal statistics accumulated by the arena
+    void dump_arena_statistics ();
+#endif /* __TBB_STATISTICS */
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#if __TBB_COUNT_TASK_NODES
+    //! Returns the number of task objects "living" in worker threads
+    intptr_t workers_task_node_count();
+#endif
+
+    /** Must be the last data field */
+    arena_slot slot[1];
+}; // class arena
+
+
+#if __TBB_ARENA_PER_MASTER
+template<bool Spawned> void arena::advertise_new_work() {
+    if( !Spawned ) { // i.e. the work was enqueued
+        if( my_max_num_workers==0 ) {
+            my_max_num_workers = 1;
+            my_mandatory_concurrency = true;
+            prefix().pool_state = SNAPSHOT_FULL;
+            my_market->adjust_demand( *this, 1 );
+            return;
+        }
+        // Local memory fence is required to avoid missed wakeups; see the comment below.
+        // Starvation resistant tasks require mandatory concurrency, so missed wakeups are unacceptable.
+        __TBB_full_memory_fence(); 
+    }
+    // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.
+    // Technically, to avoid missed wakeups, there should be a full memory fence between the point we 
+    // released the task pool (i.e. spawned task) and read the arena's state.  However, adding such a 
+    // fence might hurt overall performance more than it helps, because the fence would be executed 
+    // on every task pool release, even when stealing does not occur.  Since TBB allows parallelism, 
+    // but never promises parallelism, the missed wakeup is not a correctness problem.
+    pool_state_t snapshot = prefix().pool_state;
+    if( is_busy_or_empty(snapshot) ) {
+        // Attempt to mark as full.  The compare_and_swap below is a little unusual because the 
+        // result is compared to a value that can be different than the comparand argument.
+        if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {
+            if( snapshot!=SNAPSHOT_EMPTY ) {
+                // This thread read "busy" into snapshot, and then another thread transitioned 
+                // pool_state to "empty" in the meantime, which caused the compare_and_swap above 
+                // to fail.  Attempt to transition pool_state from "empty" to "full".
+                if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {
+                    // Some other thread transitioned pool_state from "empty", and hence became
+                    // responsible for waking up workers.
+                    return;
+                }
+            }
+            // This thread transitioned pool from empty to full state, and thus is responsible for
+            // telling RML that there is work to do.
+            if( Spawned ) {
+                if( my_mandatory_concurrency ) {
+                    __TBB_ASSERT(my_max_num_workers==1, "");
+                    // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks.
+                    // Now a single active thread (must be the master) supposedly starts a new parallel region
+                    // with relaxed sequential semantics, and oversubscription should be avoided.
+                    // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it.
+                    my_max_num_workers = 0;
+                    my_mandatory_concurrency = false;
+                    return;
+                }
+            }
+            my_market->adjust_demand( *this, my_max_num_workers );
+        }
+    }
+}
+#else /* !__TBB_ARENA_PER_MASTER */
+inline void arena::mark_pool_full()  {
+    // Double-check idiom that is deliberately sloppy about memory fences.
+    // Technically, to avoid missed wakeups, there should be a full memory fence between the point we 
+    // released the task pool (i.e. spawned task) and read the arena's state.  However, adding such a 
+    // fence might hurt overall performance more than it helps, because the fence would be executed 
+    // on every task pool release, even when stealing does not occur.  Since TBB allows parallelism, 
+    // but never promises parallelism, the missed wakeup is not a correctness problem.
+    pool_state_t snapshot = prefix().pool_state;
+    if( is_busy_or_empty(snapshot) ) {
+        // Attempt to mark as full.  The compare_and_swap below is a little unusual because the 
+        // result is compared to a value that can be different than the comparand argument.
+        if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) {
+            if( snapshot!=SNAPSHOT_EMPTY ) {
+                // This thread read "busy" into snapshot, and then another thread transitioned 
+                // pool_state to "empty" in the meantime, which caused the compare_and_swap above 
+                // to fail.  Attempt to transition pool_state from "empty" to "full".
+                if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) {
+                    // Some other thread transitioned pool_state from "empty", and hence became
+                    // responsible for waking up workers.
+                    return;
+                }
+            }
+            // This thread transitioned pool from empty to full state, and thus is responsible for
+            // telling RML that there is work to do.
+            prefix().server->adjust_job_count_estimate( int(prefix().number_of_workers) );
+        }
+    }
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_arena_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp
new file mode 100644 (file)
index 0000000..e390cc1
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/tbb_exception.h"
+#include "tbb_misc.h"
+#include "dynamic_link.h"
+#include <cstdlib>
+
+#if _WIN32||_WIN64
+#include "tbb/machine/windows_api.h"
+#else
+#include <dlfcn.h>
+#endif /* _WIN32||_WIN64 */
+
+using namespace std;
+
+#if __TBB_WEAK_SYMBOLS
+
+#pragma weak scalable_malloc
+#pragma weak scalable_free
+#pragma weak scalable_aligned_malloc
+#pragma weak scalable_aligned_free
+
+extern "C" {
+    void* scalable_malloc( size_t );
+    void  scalable_free( void* );
+    void* scalable_aligned_malloc( size_t, size_t );
+    void  scalable_aligned_free( void* );
+}
+
+#endif /* __TBB_WEAK_SYMBOLS */
+
+namespace tbb {
+
+namespace internal {
+
+//! Dummy routine used for first indirect call via MallocHandler.
+static void* DummyMalloc( size_t size );
+
+//! Dummy routine used for first indirect call via FreeHandler.
+static void DummyFree( void * ptr );
+
+//! Handler for memory allocation
+static void* (*MallocHandler)( size_t size ) = &DummyMalloc;
+
+//! Handler for memory deallocation
+static void (*FreeHandler)( void* pointer ) = &DummyFree;
+
+//! Dummy routine used for first indirect call via padded_allocate_handler.
+static void* dummy_padded_allocate( size_t bytes, size_t alignment );
+
+//! Dummy routine used for first indirect call via padded_free_handler.
+static void dummy_padded_free( void * ptr );
+
+// ! Allocates memory using standard malloc. It is used when scalable_allocator is not available
+static void* padded_allocate( size_t bytes, size_t alignment );
+
+// ! Allocates memory using standard free. It is used when scalable_allocator is not available
+static void padded_free( void* p );
+
+//! Handler for padded memory allocation
+static void* (*padded_allocate_handler)( size_t bytes, size_t alignment ) = &dummy_padded_allocate;
+
+//! Handler for padded memory deallocation
+static void (*padded_free_handler)( void* p ) = &dummy_padded_free;
+
+//! Table describing the how to link the handlers.
+static const dynamic_link_descriptor MallocLinkTable[] = {
+    DLD(scalable_malloc, MallocHandler),
+    DLD(scalable_free, FreeHandler),
+    DLD(scalable_aligned_malloc, padded_allocate_handler),
+    DLD(scalable_aligned_free, padded_free_handler),
+};
+
+
+#if TBB_USE_DEBUG
+#define DEBUG_SUFFIX "_debug"
+#else
+#define DEBUG_SUFFIX
+#endif /* TBB_USE_DEBUG */
+
+// MALLOCLIB_NAME is the name of the TBB memory allocator library.
+#if _WIN32||_WIN64
+#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll"
+#elif __APPLE__
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib"
+#elif __linux__
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX  __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
+#elif __FreeBSD__ || __sun || _AIX
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so"
+#else
+#error Unknown OS
+#endif
+
+//! Initialize the allocation/free handler pointers.
+/** Caller is responsible for ensuring this routine is called exactly once.
+    The routine attempts to dynamically link with the TBB memory allocator.
+    If that allocator is not found, it links to malloc and free. */
+void initialize_cache_aligned_allocator() {
+    __TBB_ASSERT( MallocHandler==&DummyMalloc, NULL );
+    bool success = dynamic_link( MALLOCLIB_NAME, MallocLinkTable, 4 );
+    if( !success ) {
+        // If unsuccessful, set the handlers to the default routines.
+        // This must be done now, and not before FillDynanmicLinks runs, because if other
+        // threads call the handlers, we want them to go through the DoOneTimeInitializations logic,
+        // which forces them to wait.
+        FreeHandler = &free;
+        MallocHandler = &malloc;
+        padded_allocate_handler = &padded_allocate;
+        padded_free_handler = &padded_free;
+    }
+#if !__TBB_RML_STATIC
+    PrintExtraVersionInfo( "ALLOCATOR", success?"scalable_malloc":"malloc" );
+#endif
+}
+
+//! Defined in task.cpp
+extern void DoOneTimeInitializations();
+
+//! Executed on very first call through MallocHandler
+static void* DummyMalloc( size_t size ) {
+    DoOneTimeInitializations();
+    __TBB_ASSERT( MallocHandler!=&DummyMalloc, NULL );
+    return (*MallocHandler)( size );
+}
+
+//! Executed on very first call throught FreeHandler
+static void DummyFree( void * ptr ) {
+    DoOneTimeInitializations();
+    __TBB_ASSERT( FreeHandler!=&DummyFree, NULL );
+    (*FreeHandler)( ptr );
+}
+
+//! Executed on very first call through padded_allocate_handler
+static void* dummy_padded_allocate( size_t bytes, size_t alignment ) {
+    DoOneTimeInitializations();
+    __TBB_ASSERT( padded_allocate_handler!=&dummy_padded_allocate, NULL );
+    return (*padded_allocate_handler)(bytes, alignment);
+}
+
+//! Executed on very first call throught padded_free_handler
+static void dummy_padded_free( void * ptr ) {
+    DoOneTimeInitializations();
+    __TBB_ASSERT( padded_free_handler!=&dummy_padded_free, NULL );
+    (*padded_free_handler)( ptr );
+}    
+
+static size_t NFS_LineSize = 128;
+
+size_t NFS_GetLineSize() {
+    return NFS_LineSize;
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // unary minus operator applied to unsigned type, result still unsigned
+    #pragma warning( disable: 4146 4706 )
+#endif
+
+void* NFS_Allocate( size_t n, size_t element_size, void* /*hint*/ ) {
+    size_t m = NFS_LineSize;
+    __TBB_ASSERT( m<=NFS_MaxLineSize, "illegal value for NFS_LineSize" );
+    __TBB_ASSERT( (m & (m-1))==0, "must be power of two" );
+    size_t bytes = n*element_size;
+
+    if (bytes<n || bytes+m<bytes) {
+        // Overflow
+        throw_exception(eid_bad_alloc);
+    }
+    
+    void* result = (*padded_allocate_handler)( bytes, m );
+    __TBB_ASSERT( ((size_t)result&(m-1)) == 0, "The address returned isn't aligned to cache line size" );
+    return result;
+}
+
+void NFS_Free( void* p ) {
+    (*padded_free_handler)( p );
+}
+
+static void* padded_allocate( size_t bytes, size_t alignment ) {    
+    unsigned char* base;
+    if( !(base=(unsigned char*)malloc(alignment+bytes)) ) {        
+        throw_exception(eid_bad_alloc);
+    }
+    // Round up to the next line
+    unsigned char* result = (unsigned char*)((uintptr_t)(base+alignment)&-alignment);
+    // Record where block actually starts.
+    ((uintptr_t*)result)[-1] = uintptr_t(base);
+    return result;    
+}
+
+static void padded_free( void* p ) {
+    if( p ) {
+        __TBB_ASSERT( (uintptr_t)p>=0x4096, "attempt to free block not obtained from cache_aligned_allocator" );
+        // Recover where block actually starts
+        unsigned char* base = ((unsigned char**)p)[-1];
+        __TBB_ASSERT( (void*)((uintptr_t)(base+NFS_LineSize)&-NFS_LineSize)==p, "not allocated by NFS_Allocate?" );
+        free(base);
+    }
+}
+
+void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ) {    
+    void* result;
+    result = (*MallocHandler) (n);
+    if (!result) {
+        // Overflow
+        throw_exception(eid_bad_alloc);
+    }
+    return result;
+}
+
+void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ) {
+    if( p ) {        
+        (*FreeHandler)( p );
+    }
+}
+
+bool __TBB_EXPORTED_FUNC is_malloc_used_v3() {
+    if (MallocHandler == &DummyMalloc) {
+        void* void_ptr = (*MallocHandler)(1);
+        (*FreeHandler)(void_ptr);
+    }
+    __TBB_ASSERT( MallocHandler!=&DummyMalloc && FreeHandler!=&DummyFree, NULL );
+    __TBB_ASSERT((MallocHandler==&malloc && FreeHandler==&free) ||
+                 (MallocHandler!=&malloc && FreeHandler!=&free), NULL );
+    return MallocHandler == &malloc;
+}
+
+} // namespace internal
+
+} // namespace tbb
+
+#if __TBB_RML_STATIC
+#include "tbb/atomic.h"
+static tbb::atomic<int> module_inited;
+namespace tbb {
+namespace internal {
+void DoOneTimeInitializations() {
+    if( module_inited!=2 ) {
+        if( module_inited.compare_and_swap(1, 0)==0 ) {
+            initialize_cache_aligned_allocator();
+            module_inited = 2;
+        } else {
+            do {
+                __TBB_Yield();
+            } while( module_inited!=2 );
+        }
+    }
+}
+}} //namespace tbb::internal
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/cilk-tbb-interop.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/cilk-tbb-interop.h
new file mode 100644 (file)
index 0000000..87555b8
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef CILK_TBB_INTEROP_H
+#define CILK_TBB_INTEROP_H
+
+#ifndef _WIN32
+#ifdef IN_CILK_RUNTIME
+#define CILK_EXPORT __attribute__((visibility("protected")))
+#else
+#define CILK_EXPORT /* nothing */
+#endif
+#else
+#ifdef IN_CILK_RUNTIME
+#define CILK_EXPORT __declspec(dllexport)
+#else
+#define CILK_EXPORT __declspec(dllimport)
+#endif  // IN_CILK_RUNTIME
+#endif // _WIN32
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* A return code.  0 indicates success */
+typedef int __cilk_tbb_retcode;
+
+enum __cilk_tbb_stack_op {
+    CILK_TBB_STACK_ORPHAN, // disconnecting stack from a thread
+    CILK_TBB_STACK_ADOPT,  // reconnecting orphaned stack to a trhead
+    CILK_TBB_STACK_RELEASE // releasing stack
+};
+
+typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_stack_op)(enum __cilk_tbb_stack_op, void* data);
+
+typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_unwatch_stacks)(void *data);
+
+/* Each thunk structure has two pointers: "routine" and "data".
+   The caller of the thunk invokes *routine, passing "data" as the void* parameter. */
+
+/* Thunk invoked by Cilk when it changes the relationship between a stack and a thread.
+   It does not matter what stack the thunk runs on.
+   The thread (not fiber) on which the thunk runs is important.
+
+   CILK_TBB_STACK_ORPHAN
+      The thunk must be invoked on the thread disconnecting itself from the stack.
+      Must "happen before" the stack is adopted elsewhere.
+   CILK_TBB_STACK_ADOPT
+      The thunk must be invoked on the thread adopting the stack.
+   CILK_TBB_STACK_RELEASE
+      The thunk must be invoked on the thread doing the releasing,
+      Must "happen before" the stack is used elsewhere.
+
+   When a non-empty stack is transfered between threads, the first thread must orphan it 
+   and the second thread must adopt it.
+
+   An empty stack can be transfered similarly, or simply released by the first thread.
+
+   Here is a summary of the actions as transitions on a state machine.
+
+                       watch                                    ORPHAN
+                       -->-->                                   -->--
+                      /      \                                 /     \
+   (freed empty stack)       (TBB sees stack running on thread)      (stack in limbo)
+                |     \     /                                  \     /     |
+                |      --<--                                    --<--      |
+                ^      RELEASE or                              ADOPT       V
+                 \     unwatch                                            / 
+                  \                                                      /
+                   --------------------------<---------------------------
+                                          RELEASE
+*/
+struct __cilk_tbb_stack_op_thunk {
+    __cilk_tbb_pfn_stack_op routine;
+    void* data;                 /* Set by TBB */
+};
+
+/* Thunk invoked by TBB when it is no longer interested in watching the stack bound to the current thread. */
+struct __cilk_tbb_unwatch_thunk {
+    __cilk_tbb_pfn_unwatch_stacks routine;
+    void* data;      
+};
+
+/* Called by TBB, defined by Cilk. 
+   Requests that callee invoke __cilk_tbb_stack_op_thunk when it orphans a stack. 
+   Callee sets *u to a thunk that TBB should call when it is no longer interested in watching the stack. */
+CILK_EXPORT
+__cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk* u,
+                                         struct __cilk_tbb_stack_op_thunk o);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif  // CILK_TBB_INTEROP_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_hash_map.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_hash_map.cpp
new file mode 100644 (file)
index 0000000..b37387d
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_hash_map.h"
+
+namespace tbb {
+
+namespace internal {
+#if !TBB_NO_LEGACY
+struct hash_map_segment_base {
+    typedef spin_rw_mutex segment_mutex_t;
+    //! Type of a hash code.
+    typedef size_t hashcode_t;
+    //! Log2 of n_segment
+    static const size_t n_segment_bits = 6;
+    //! Maximum size of array of chains
+    static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits);
+    //! Mutex that protects this segment
+    segment_mutex_t my_mutex;
+    // Number of nodes
+    atomic<size_t> my_logical_size;
+    // Size of chains
+    /** Always zero or a power of two */
+    size_t my_physical_size;
+    //! True if my_logical_size>=my_physical_size.
+    /** Used to support Intel(R) Thread Checker. */
+    bool __TBB_EXPORTED_METHOD internal_grow_predicate() const;
+};
+
+bool hash_map_segment_base::internal_grow_predicate() const {
+    // Intel(R) Thread Checker considers the following reads to be races, so we hide them in the 
+    // library so that Intel(R) Thread Checker will ignore them.  The reads are used in a double-check
+    // context, so the program is nonetheless correct despite the race.
+    return my_logical_size >= my_physical_size && my_physical_size < max_physical_size;
+}
+#endif//!TBB_NO_LEGACY
+
+} // namespace internal
+
+} // namespace tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.cpp
new file mode 100644 (file)
index 0000000..3cd5054
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "concurrent_monitor.h"
+
+namespace tbb {
+namespace internal {
+
+void concurrent_monitor::prepare_wait( thread_context& thr, void* ctx ) {
+    // this is good place to pump previous spurious wakeup
+    if( thr.spurious ) {
+        thr.spurious = false;
+        thr.sema.P();
+    }
+    thr.context = ctx;
+    thr.in_waitset = true;
+    {
+        tbb::spin_mutex::scoped_lock l( mutex_ec );
+        thr.epoch = epoch;
+        waitset_ec.add( (waitset_t::node_t*)&thr );
+    }
+    __TBB_full_memory_fence();
+}
+
+void concurrent_monitor::cancel_wait( thread_context& thr ) {
+    // spurious wakeup will be pumped in the following prepare_wait()
+    thr.spurious = true;
+    // try to remove node from waitset
+    bool th_in_waitset = thr.in_waitset;
+    if( th_in_waitset ) {
+        tbb::spin_mutex::scoped_lock l( mutex_ec );
+        if (thr.in_waitset) {
+            // successfully removed from waitset,
+            // so there will be no spurious wakeup
+            thr.in_waitset = false;
+            thr.spurious = false;
+            waitset_ec.remove( (waitset_t::node_t&)thr );
+        }
+    }
+}
+
+void concurrent_monitor::notify_one_relaxed() {
+    if( waitset_ec.size()==0 )
+        return;
+    waitset_node_t* n;
+    const waitset_node_t* end = waitset_ec.end();
+    {
+        tbb::spin_mutex::scoped_lock l( mutex_ec );
+        epoch = epoch + 1;
+        n = waitset_ec.front();
+        if( n!=end ) {
+            waitset_ec.remove( *n );
+            to_thread_context(n)->in_waitset = false;
+        }
+    }
+    if( n!=end ) 
+        to_thread_context(n)->sema.V();
+}
+
+void concurrent_monitor::notify_all_relaxed() {
+    if( waitset_ec.size()==0 )
+        return;
+    dllist_t temp;
+    const waitset_node_t* end;
+    {
+        tbb::spin_mutex::scoped_lock l( mutex_ec );
+        epoch = epoch + 1;
+        waitset_ec.flush_to( temp );
+        end = temp.end();
+        for( waitset_node_t* n=temp.front(); n!=end; n=n->next )
+            to_thread_context(n)->in_waitset = false;
+    }
+    waitset_node_t* nxt;
+    for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
+        nxt = n->next;
+        to_thread_context(n)->sema.V();
+    }
+#if TBB_USE_DEBUG
+    temp.clear();
+#endif
+}
+
+} // namespace internal
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_monitor.h
new file mode 100644 (file)
index 0000000..77ae9a3
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_concurrent_monitor_H
+#define __TBB_concurrent_monitor_H
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/atomic.h"
+#include "tbb/spin_mutex.h"
+#include "semaphore.h"
+
+namespace tbb {
+namespace internal {
+
+//! Circular doubly-linked list with sentinel
+/** head.next points to the front and  head.prev points to the back */
+class circular_doubly_linked_list_with_sentinel : no_copy {
+public:  
+    struct node_t {
+        node_t* next;  
+        node_t* prev;  
+        node_t() : next(NULL), prev(NULL) {}
+    };
+
+    // ctor
+    circular_doubly_linked_list_with_sentinel() {clear();}
+    // dtor
+    ~circular_doubly_linked_list_with_sentinel() {__TBB_ASSERT( head.next==&head && head.prev==&head, "the list is not empty" );}
+    
+    inline size_t  size() const {return count;}
+    inline bool    empty()  const {return size()==0;}
+    inline node_t* front()  const {return head.next;}
+    inline node_t* last()   const {return head.prev;}
+    inline node_t* begin()  const {return front();}
+    inline const node_t* end() const {return &head;}
+
+    //! add to the back of the list
+    inline void add( node_t* n ) {
+        count = count + 1;
+        n->prev = head.prev;  
+        n->next = &head;  
+        head.prev->next = n;  
+        head.prev = n;
+    }
+  
+    //! remove node 'n' from the 'this' list
+    inline void remove( node_t& n ) {
+        count = count - 1;
+        n.prev->next = n.next;
+        n.next->prev = n.prev;
+    }  
+
+    //! move all elements to 'lst' and initiallize the 'this' list
+    inline void flush_to( circular_doubly_linked_list_with_sentinel& lst ) {
+        if( count>0 ) {  
+            lst.count = count;
+            lst.head.next = head.next;  
+            lst.head.prev = head.prev;
+            head.next->prev = &lst.head;
+            head.prev->next = &lst.head;
+            clear();
+        }
+    }
+  
+#if !TBB_USE_DEBUG
+private:  
+#endif
+    atomic<size_t> count;
+    node_t head;
+    void clear() {count = 0; head.next = &head; head.prev = &head;}
+};
+
+typedef circular_doubly_linked_list_with_sentinel waitset_t;
+typedef circular_doubly_linked_list_with_sentinel dllist_t;
+typedef circular_doubly_linked_list_with_sentinel::node_t waitset_node_t;
+
+class concurrent_monitor;
+
+//! concurrent_monitor
+/** fine-grained concurrent_monitor implementation */
+class concurrent_monitor : no_copy {
+public:
+    /** per-thread descriptor for concurrent_monitor */
+    class thread_context : waitset_node_t, no_copy {
+        friend class concurrent_monitor;
+    public:
+        thread_context() : spurious(false), context(NULL) {epoch = 0; in_waitset = false;}
+        ~thread_context() { if( spurious ) sema.P(); }
+    private:
+        semaphore   sema;
+        tbb::atomic<unsigned> epoch;
+        tbb::atomic<bool>     in_waitset;
+        bool         spurious;
+        void*        context;
+    };
+
+    //! ctor
+    concurrent_monitor() {epoch = 0;}
+
+    //! prepare wait by inserting 'thr' into the wailt queue
+    void prepare_wait( thread_context& thr, void* ctx = 0 );
+
+    //! Commit wait if even count has not changed; otherwise, cancel wait.
+    /** Returns true of commited; false if canceled. */
+    inline bool commit_wait( thread_context& thr ) {
+        bool do_it = thr.epoch==epoch;
+        // this check is just an optimization
+        if( do_it ) {
+            thr.sema.P();
+            __TBB_ASSERT( !thr.in_waitset, "still in the queue?" );
+        } else {
+            cancel_wait( thr );
+        }
+        return do_it;
+    }
+    //! Cancel the wait. Removes the thread from the wait queue if not removed yet.
+    void cancel_wait( thread_context& thr );
+
+    //! Notify one thread about the event
+    void notify_one() {__TBB_full_memory_fence(); notify_one_relaxed();}
+    //! Notify one thread about the event. Relaxed version.
+    void notify_one_relaxed();
+
+    //! Notify all waiting threads of the event
+    void notify_all() {__TBB_full_memory_fence(); notify_all_relaxed();}
+    //! Notify all waiting threads of the event; Relaxed version
+    void notify_all_relaxed();
+
+    //! Notify waiting threads of the event that satisfies the given predicate
+    template<typename P> void notify( const P& predicate ) {__TBB_full_memory_fence(); notify_relaxed( predicate );}
+    //! Notify waiting threads of the event that satisfies the given predicate; Relaxed version
+    template<typename P> void notify_relaxed( const P& predicate );
+
+private:
+    tbb::spin_mutex mutex_ec;
+    waitset_t       waitset_ec;
+    tbb::atomic<unsigned> epoch;
+    thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); }
+};
+
+template<typename P> 
+void concurrent_monitor::notify_relaxed( const P& predicate ) {
+        if( waitset_ec.size()==0 )
+            return;
+        dllist_t temp;
+        waitset_node_t* nxt;
+        const waitset_node_t* end = waitset_ec.end();
+        {
+            tbb::spin_mutex::scoped_lock l( mutex_ec );
+            epoch = epoch + 1;
+            for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) {
+                nxt = n->prev;
+                thread_context* thr = to_thread_context( n );
+                if( predicate( thr->context ) ) {
+                    waitset_ec.remove( *n );
+                    thr->in_waitset = false;
+                    temp.add( n );
+                }
+            }
+        }
+    
+        end = temp.end();
+        for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
+            nxt = n->next;
+            to_thread_context(n)->sema.V();
+        }
+#if TBB_USE_DEBUG
+        temp.clear();
+#endif
+}
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_concurrent_monitor_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_queue.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_queue.cpp
new file mode 100644 (file)
index 0000000..6089660
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/tbb_machine.h"
+#include "tbb/tbb_exception.h"
+#include "tbb/_concurrent_queue_internal.h"
+#include "concurrent_monitor.h"
+#include "itt_notify.h"
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>   // for memset()
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+using namespace std;
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif
+
+#define RECORD_EVENTS 0
+
+
+namespace tbb {
+
+namespace internal {
+
+typedef concurrent_queue_base_v3 concurrent_queue_base;
+
+typedef size_t ticket;
+
+//! A queue using simple locking.
+/** For efficient, this class has no constructor.  
+    The caller is expected to zero-initialize it. */
+struct micro_queue {
+    typedef concurrent_queue_base::page page;
+
+    friend class micro_queue_pop_finalizer;
+
+    atomic<page*> head_page;
+    atomic<ticket> head_counter;
+
+    atomic<page*> tail_page;
+    atomic<ticket> tail_counter;
+
+    spin_mutex page_mutex;
+    
+    void push( const void* item, ticket k, concurrent_queue_base& base );
+
+    bool pop( void* dst, ticket k, concurrent_queue_base& base );
+
+    micro_queue& assign( const micro_queue& src, concurrent_queue_base& base );
+
+    page* make_copy ( concurrent_queue_base& base, const page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) ;
+
+    void make_invalid( ticket k );
+};
+
+// we need to yank it out of micro_queue because of concurrent_queue_base::deallocate_page being virtual.
+class micro_queue_pop_finalizer: no_copy {
+    typedef concurrent_queue_base::page page;
+    ticket my_ticket;
+    micro_queue& my_queue;
+    page* my_page; 
+    concurrent_queue_base &base;
+public:
+    micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base& b, ticket k, page* p ) :
+        my_ticket(k), my_queue(queue), my_page(p), base(b)
+    {}
+    ~micro_queue_pop_finalizer() {
+        page* p = my_page;
+        if( p ) {
+            spin_mutex::scoped_lock lock( my_queue.page_mutex );
+            page* q = p->next;
+            my_queue.head_page = q;
+            if( !q ) {
+                my_queue.tail_page = NULL;
+            }
+        }
+        my_queue.head_counter = my_ticket;
+        if( p )
+           base.deallocate_page( p );
+    }
+};
+
+struct predicate_leq {
+    ticket t;
+    predicate_leq( ticket t_ ) : t(t_) {}
+    bool operator() ( void* p ) const {return (ticket)p<=t;}
+};
+
+//! Internal representation of a ConcurrentQueue.
+/** For efficient, this class has no constructor.  
+    The caller is expected to zero-initialize it. */
+class concurrent_queue_rep {
+public:
+private:
+    friend struct micro_queue;
+
+    //! Approximately n_queue/golden ratio
+    static const size_t phi = 3;
+
+public:
+    //! Must be power of 2
+    static const size_t n_queue = 8; 
+
+    //! Map ticket to an array index
+    static size_t index( ticket k ) {
+        return k*phi%n_queue;
+    }
+
+    atomic<ticket> head_counter;
+    concurrent_monitor items_avail;
+    atomic<size_t> n_invalid_entries;
+    char pad1[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor)+sizeof(atomic<size_t>))&(NFS_MaxLineSize-1))];
+
+    atomic<ticket> tail_counter;
+    concurrent_monitor slots_avail;
+    char pad2[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor))&(NFS_MaxLineSize-1))];
+    micro_queue array[n_queue];    
+
+    micro_queue& choose( ticket k ) {
+        // The formula here approximates LRU in a cache-oblivious way.
+        return array[index(k)];
+    }
+
+    //! Value for effective_capacity that denotes unbounded queue.
+    static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2);
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // unary minus operator applied to unsigned type, result still unsigned
+    #pragma warning( push )
+    #pragma warning( disable: 4146 )
+#endif
+
+static void* invalid_page;
+
+//------------------------------------------------------------------------
+// micro_queue
+//------------------------------------------------------------------------
+void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) {
+    k &= -concurrent_queue_rep::n_queue;
+    page* p = NULL;
+    size_t index = k/concurrent_queue_rep::n_queue & (base.items_per_page-1);
+    if( !index ) {
+        __TBB_TRY {
+            p = base.allocate_page();
+        } __TBB_CATCH(...) {
+            ++base.my_rep->n_invalid_entries;
+            make_invalid( k );
+        }
+        p->mask = 0;
+        p->next = NULL;
+    }
+
+    if( tail_counter!=k ) {
+        atomic_backoff backoff;
+        do {
+            backoff.pause();
+            // no memory. throws an exception; assumes concurrent_queue_rep::n_queue>1
+            if( tail_counter&0x1 ) {
+                ++base.my_rep->n_invalid_entries;
+                throw_exception( eid_bad_last_alloc );
+            }
+        } while( tail_counter!=k ) ;
+    }
+        
+    if( p ) {
+        spin_mutex::scoped_lock lock( page_mutex );
+        if( page* q = tail_page )
+            q->next = p;
+        else
+            head_page = p; 
+        tail_page = p;
+    } else {
+        p = tail_page;
+    }
+    ITT_NOTIFY( sync_acquired, p );
+
+    __TBB_TRY {
+        base.copy_item( *p, index, item );
+        ITT_NOTIFY( sync_releasing, p );
+        // If no exception was thrown, mark item as present.
+        p->mask |= uintptr_t(1)<<index;
+        tail_counter += concurrent_queue_rep::n_queue; 
+    } __TBB_CATCH(...) {
+        ++base.my_rep->n_invalid_entries;
+        tail_counter += concurrent_queue_rep::n_queue; 
+        __TBB_RETHROW();
+    }
+}
+
+bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) {
+    k &= -concurrent_queue_rep::n_queue;
+    spin_wait_until_eq( head_counter, k );
+    spin_wait_while_eq( tail_counter, k );
+    page& p = *head_page;
+    __TBB_ASSERT( &p, NULL );
+    size_t index = k/concurrent_queue_rep::n_queue & (base.items_per_page-1);
+    bool success = false; 
+    {
+        micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); 
+        if( p.mask & uintptr_t(1)<<index ) {
+            success = true;
+            ITT_NOTIFY( sync_acquired, dst );
+            ITT_NOTIFY( sync_acquired, head_page );
+            base.assign_and_destroy_item( dst, p, index );
+            ITT_NOTIFY( sync_releasing, head_page );
+        } else {
+            --base.my_rep->n_invalid_entries;
+        }
+    }
+    return success;
+}
+
+micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base& base )
+{
+    head_counter = src.head_counter;
+    tail_counter = src.tail_counter;
+    page_mutex   = src.page_mutex;
+
+    const page* srcp = src.head_page;
+    if( srcp ) {
+        ticket g_index = head_counter;
+        __TBB_TRY {
+            size_t n_items  = (tail_counter-head_counter)/concurrent_queue_rep::n_queue;
+            size_t index = head_counter/concurrent_queue_rep::n_queue & (base.items_per_page-1);
+            size_t end_in_first_page = (index+n_items<base.items_per_page)?(index+n_items):base.items_per_page;
+
+            head_page = make_copy( base, srcp, index, end_in_first_page, g_index );
+            page* cur_page = head_page;
+
+            if( srcp != src.tail_page ) {
+                for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) {
+                    cur_page->next = make_copy( base, srcp, 0, base.items_per_page, g_index );
+                    cur_page = cur_page->next;
+                }
+
+                __TBB_ASSERT( srcp==src.tail_page, NULL );
+
+                size_t last_index = tail_counter/concurrent_queue_rep::n_queue & (base.items_per_page-1);
+                if( last_index==0 ) last_index = base.items_per_page;
+
+                cur_page->next = make_copy( base, srcp, 0, last_index, g_index );
+                cur_page = cur_page->next;
+            }
+            tail_page = cur_page;
+        } __TBB_CATCH(...) {
+            make_invalid( g_index );
+        }
+    } else {
+        head_page = tail_page = NULL;
+    }
+    return *this;
+}
+
+concurrent_queue_base::page* micro_queue::make_copy( concurrent_queue_base& base, const concurrent_queue_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index )
+{
+    page* new_page = base.allocate_page();
+    new_page->next = NULL;
+    new_page->mask = src_page->mask;
+    for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )
+        if( new_page->mask & uintptr_t(1)<<begin_in_page )
+            base.copy_page_item( *new_page, begin_in_page, *src_page, begin_in_page );
+    return new_page;
+}
+
+void micro_queue::make_invalid( ticket k )
+{
+    static concurrent_queue_base::page dummy = {static_cast<page*>((void*)1), 0};
+    // mark it so that no more pushes are allowed.
+    invalid_page = &dummy;
+    {
+        spin_mutex::scoped_lock lock( page_mutex );
+        tail_counter = k+concurrent_queue_rep::n_queue+1;
+        if( page* q = tail_page )
+            q->next = static_cast<page*>(invalid_page);
+        else
+            head_page = static_cast<page*>(invalid_page); 
+        tail_page = static_cast<page*>(invalid_page);
+    }
+    __TBB_RETHROW();
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif // warning 4146 is back
+
+//------------------------------------------------------------------------
+// concurrent_queue_base
+//------------------------------------------------------------------------
+concurrent_queue_base_v3::concurrent_queue_base_v3( size_t item_size ) {
+    items_per_page = item_size<=8 ? 32 :
+                     item_size<=16 ? 16 : 
+                     item_size<=32 ? 8 :
+                     item_size<=64 ? 4 :
+                     item_size<=128 ? 2 :
+                     1;
+    my_capacity = size_t(-1)/(item_size>1 ? item_size : 2); 
+    my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1);
+    __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" );
+    __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" );
+    memset(my_rep,0,sizeof(concurrent_queue_rep));
+    new ( &my_rep->items_avail ) concurrent_monitor();
+    new ( &my_rep->slots_avail ) concurrent_monitor();
+    this->item_size = item_size;
+}
+
+concurrent_queue_base_v3::~concurrent_queue_base_v3() {
+    size_t nq = my_rep->n_queue;
+    for( size_t i=0; i<nq; i++ )
+        __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" );
+    cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1);
+}
+
+void concurrent_queue_base_v3::internal_push( const void* src ) {
+    concurrent_queue_rep& r = *my_rep;
+    ticket k = r.tail_counter++;
+    ptrdiff_t e = my_capacity;
+    atomic_backoff backoff;
+    concurrent_monitor::thread_context thr_ctx;
+#if DO_ITT_NOTIFY
+    bool sync_prepare_done = false;
+#endif
+    while( (ptrdiff_t)(k-r.head_counter)>=e ) {
+#if DO_ITT_NOTIFY
+        if( !sync_prepare_done ) {
+            ITT_NOTIFY( sync_prepare, &sync_prepare_done );
+            sync_prepare_done = true;
+        }
+#endif
+        if( !backoff.bounded_pause() ) {
+            bool slept = false;
+            r.slots_avail.prepare_wait( thr_ctx, (void*) ((ptrdiff_t)(k-e)) );
+            while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) {
+                if( (slept = r.slots_avail.commit_wait( thr_ctx ) )==true )
+                    break;
+                r.slots_avail.prepare_wait( thr_ctx, (void*) ((ptrdiff_t)(k-e)) );
+            }
+            if( !slept )
+                r.slots_avail.cancel_wait( thr_ctx );
+            break;
+        }
+        e = const_cast<volatile ptrdiff_t&>(my_capacity);
+    }
+    ITT_NOTIFY( sync_acquired, &sync_prepare_done );
+    r.choose( k ).push( src, k, *this );
+    r.items_avail.notify( predicate_leq(k) );
+}
+
+void concurrent_queue_base_v3::internal_pop( void* dst ) {
+    concurrent_queue_rep& r = *my_rep;
+    ticket k;
+    atomic_backoff backoff;
+    concurrent_monitor::thread_context thr_ctx;
+#if DO_ITT_NOTIFY
+    bool sync_prepare_done = false;
+#endif
+    do {
+        k=r.head_counter++;
+        while( r.tail_counter<=k ) {
+#if DO_ITT_NOTIFY
+            if( !sync_prepare_done ) {
+                ITT_NOTIFY( sync_prepare, dst );
+                sync_prepare_done = true;
+            }
+#endif
+            // Queue is empty; pause and re-try a few times
+            if( !backoff.bounded_pause() ) {
+                bool slept = false;
+                r.items_avail.prepare_wait( thr_ctx, (void*)k );
+                while( r.tail_counter<=k ) {
+                    if( (slept = r.items_avail.commit_wait( thr_ctx ) )==true )
+                        break;
+                    r.items_avail.prepare_wait( thr_ctx, (void*)k );
+                }
+                if( !slept )
+                    r.items_avail.cancel_wait( thr_ctx );
+                break; // break from inner while
+            }
+        } // break to here
+    } while( !r.choose(k).pop(dst,k,*this) );
+
+    // wake up a producer..
+    r.slots_avail.notify( predicate_leq(k) );
+}
+
+bool concurrent_queue_base_v3::internal_pop_if_present( void* dst ) {
+    concurrent_queue_rep& r = *my_rep;
+    ticket k;
+    do {
+        k = r.head_counter;
+        for(;;) {
+            if( r.tail_counter<=k ) {
+                // Queue is empty 
+                return false;
+            }
+            // Queue had item with ticket k when we looked.  Attempt to get that item.
+            ticket tk=k;
+            k = r.head_counter.compare_and_swap( tk+1, tk );
+            if( k==tk )
+                break;
+            // Another thread snatched the item, retry.
+        }
+    } while( !r.choose( k ).pop( dst, k, *this ) );
+
+    r.slots_avail.notify( predicate_leq(k) );
+
+    return true;
+}
+
+bool concurrent_queue_base_v3::internal_push_if_not_full( const void* src ) {
+    concurrent_queue_rep& r = *my_rep;
+    ticket k = r.tail_counter;
+    for(;;) {
+        if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) {
+            // Queue is full
+            return false;
+        }
+        // Queue had empty slot with ticket k when we looked.  Attempt to claim that slot.
+        ticket tk=k;
+        k = r.tail_counter.compare_and_swap( tk+1, tk );
+        if( k==tk ) 
+            break;
+        // Another thread claimed the slot, so retry. 
+    }
+    r.choose(k).push(src,k,*this);
+
+    r.items_avail.notify( predicate_leq(k) );
+    return true;
+}
+
+ptrdiff_t concurrent_queue_base_v3::internal_size() const {
+    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
+    return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter-my_rep->n_invalid_entries);
+}
+
+bool concurrent_queue_base_v3::internal_empty() const {
+    ticket tc = my_rep->tail_counter;
+    ticket hc = my_rep->head_counter;
+    // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.
+    return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 );
+}
+
+void concurrent_queue_base_v3::internal_set_capacity( ptrdiff_t capacity, size_t /*item_size*/ ) {
+    my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity;
+}
+
+void concurrent_queue_base_v3::internal_finish_clear() {
+    size_t nq = my_rep->n_queue;
+    for( size_t i=0; i<nq; ++i ) {
+        page* tp = my_rep->array[i].tail_page;
+        __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" );
+        if( tp!=NULL) {
+            if( tp!=invalid_page ) deallocate_page( tp );
+            my_rep->array[i].tail_page = NULL;
+        }
+    }
+}
+
+void concurrent_queue_base_v3::internal_throw_exception() const {
+    throw_exception( eid_bad_alloc );
+}
+
+void concurrent_queue_base_v3::assign( const concurrent_queue_base& src ) {
+    items_per_page = src.items_per_page;
+    my_capacity = src.my_capacity;
+
+    // copy concurrent_queue_rep.
+    my_rep->head_counter = src.my_rep->head_counter;
+    my_rep->tail_counter = src.my_rep->tail_counter;
+    my_rep->n_invalid_entries = src.my_rep->n_invalid_entries;
+
+    // copy micro_queues
+    for( size_t i = 0; i<my_rep->n_queue; ++i )
+        my_rep->array[i].assign( src.my_rep->array[i], *this);
+
+    __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter, 
+            "the source concurrent queue should not be concurrently modified." );
+}
+
+//------------------------------------------------------------------------
+// concurrent_queue_iterator_rep
+//------------------------------------------------------------------------
+class concurrent_queue_iterator_rep: no_assign {
+public:
+    ticket head_counter;   
+    const concurrent_queue_base& my_queue;
+    const size_t offset_of_last;
+    concurrent_queue_base::page* array[concurrent_queue_rep::n_queue];
+    concurrent_queue_iterator_rep( const concurrent_queue_base& queue, size_t offset_of_last_ ) : 
+        head_counter(queue.my_rep->head_counter),
+        my_queue(queue),
+        offset_of_last(offset_of_last_)
+    {
+        const concurrent_queue_rep& rep = *queue.my_rep;
+        for( size_t k=0; k<concurrent_queue_rep::n_queue; ++k )
+            array[k] = rep.array[k].head_page;
+    }
+    //! Set item to point to kth element.  Return true if at end of queue or item is marked valid; false otherwise.
+    bool get_item( void*& item, size_t k ) {
+        if( k==my_queue.my_rep->tail_counter ) {
+            item = NULL;
+            return true;
+        } else {
+            concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)];
+            __TBB_ASSERT(p,NULL);
+            size_t i = k/concurrent_queue_rep::n_queue & (my_queue.items_per_page-1);
+            item = static_cast<unsigned char*>(static_cast<void*>(p)) + offset_of_last + my_queue.item_size*i;
+            return (p->mask & uintptr_t(1)<<i)!=0;
+        }
+    }
+};
+
+//------------------------------------------------------------------------
+// concurrent_queue_iterator_base
+//------------------------------------------------------------------------
+
+void concurrent_queue_iterator_base_v3::initialize( const concurrent_queue_base& queue, size_t offset_of_last ) {
+    my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1);
+    new( my_rep ) concurrent_queue_iterator_rep(queue,offset_of_last);
+    size_t k = my_rep->head_counter;
+    if( !my_rep->get_item(my_item, k) ) advance();
+}
+
+concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue ) {
+    initialize(queue,0);
+}
+
+concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue, size_t offset_of_last ) {
+    initialize(queue,offset_of_last);
+}
+
+void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base& other ) {
+    if( my_rep!=other.my_rep ) {
+        if( my_rep ) {
+            cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1);
+            my_rep = NULL;
+        }
+        if( other.my_rep ) {
+            my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1);
+            new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep );
+        }
+    }
+    my_item = other.my_item;
+}
+
+void concurrent_queue_iterator_base_v3::advance() {
+    __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" );  
+    size_t k = my_rep->head_counter;
+    const concurrent_queue_base& queue = my_rep->my_queue;
+#if TBB_USE_ASSERT
+    void* tmp;
+    my_rep->get_item(tmp,k);
+    __TBB_ASSERT( my_item==tmp, NULL );
+#endif /* TBB_USE_ASSERT */
+    size_t i = k/concurrent_queue_rep::n_queue & (queue.items_per_page-1);
+    if( i==queue.items_per_page-1 ) {
+        concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)];
+        root = root->next;
+    }
+    // advance k
+    my_rep->head_counter = ++k;
+    if( !my_rep->get_item(my_item, k) ) advance();
+}
+
+concurrent_queue_iterator_base_v3::~concurrent_queue_iterator_base_v3() {
+    //delete my_rep;
+    cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1);
+    my_rep = NULL;
+}
+
+} // namespace internal
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_vector.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/concurrent_vector.cpp
new file mode 100644 (file)
index 0000000..aa90c3f
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_vector.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_exception.h"
+#include "tbb_misc.h"
+#include "itt_notify.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif
+
+using namespace std;
+
+namespace tbb {
+
+namespace internal {
+    class concurrent_vector_base_v3::helper :no_assign {
+public:
+    //! memory page size
+    static const size_type page_size = 4096;
+
+    inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp
+        return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details
+    }
+
+    inline static size_type find_segment_end(const concurrent_vector_base_v3 &v) {
+        segment_t *s = v.my_segment;
+        segment_index_t u = s==v.my_storage? pointers_per_short_table : pointers_per_long_table;
+        segment_index_t k = 0;
+        while( k < u && s[k].array > internal::vector_allocation_error_flag )
+            ++k;
+        return k;
+    }
+
+    // TODO: optimize accesses to my_first_block
+    //! assign first segment size. k - is index of last segment to be allocated, not a count of segments
+    inline static void assign_first_segment_if_neccessary(concurrent_vector_base_v3 &v, segment_index_t k) {
+        if( !v.my_first_block ) {
+            /* There was a suggestion to set first segment according to incompact_predicate:
+            while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )
+                --k; // while previous vector size is compact, decrement
+            // reasons to not do it:
+            // * constructor(n) is not ready to accept fragmented segments
+            // * backward compatibility due to that constructor
+            // * current version gives additional guarantee and faster init.
+            // * two calls to reserve() will give the same effect.
+            */
+            v.my_first_block.compare_and_swap(k+1, 0); // store number of segments
+        }
+    }
+
+    inline static void *allocate_segment(concurrent_vector_base_v3 &v, size_type n) {
+        void *ptr = v.vector_allocator_ptr(v, n);
+        if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception
+        return ptr;
+    }
+
+    //! Publish segment so other threads can see it.
+    inline static void publish_segment( segment_t& s, void* rhs ) {
+    // see also itt_store_pointer_with_release_v3()
+        ITT_NOTIFY( sync_releasing, &s.array );
+        __TBB_store_with_release( s.array, rhs );
+    }
+
+    static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size);
+
+    // TODO: rename as get_segments_table() and return segment pointer
+    inline static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start ) {
+        if(k >= pointers_per_short_table && v.my_segment == v.my_storage)
+            extend_segment_table(v, start );
+    }
+
+    static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start);
+
+    inline static segment_t &acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner) {
+        segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as arument
+        if( !__TBB_load_with_acquire(s.array) ) { // do not check for internal::vector_allocation_error_flag 
+            if( owner ) {
+                enable_segment( v, index, element_size );
+            } else {
+                ITT_NOTIFY(sync_prepare, &s.array);
+                spin_wait_while_eq( s.array, (void*)0 );
+                ITT_NOTIFY(sync_acquired, &s.array);
+            }
+        } else {
+            ITT_NOTIFY(sync_acquired, &s.array);
+        }
+        if( s.array <= internal::vector_allocation_error_flag ) // check for internal::vector_allocation_error_flag
+            throw_exception(eid_bad_last_alloc); // throw custom exception, because it's hard to recover after internal::vector_allocation_error_flag correctly
+        return s;
+    }
+
+    ///// non-static fields of helper for exception-safe iteration across segments
+    segment_t *table;// TODO: review all segment_index_t as just short type
+    size_type first_block, k, sz, start, finish, element_size;
+    helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw()
+        : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {}
+    inline void first_segment() throw() {
+        __TBB_ASSERT( start <= finish, NULL );
+        __TBB_ASSERT( first_block || !finish, NULL );
+        if( k < first_block ) k = 0; // process solid segment at a time
+        size_type base = segment_base( k );
+        __TBB_ASSERT( base <= start, NULL );
+        finish -= base; start -= base; // rebase as offsets from segment k
+        sz = k ? base : segment_size( first_block ); // sz==base for k>0
+    }
+    inline void next_segment() throw() {
+        finish -= sz; start = 0; // offsets from next segment
+        if( !k ) k = first_block;
+        else { ++k; sz <<= 1; }
+    }
+    template<typename F>
+    inline size_type apply(const F &func) {
+        first_segment();
+        while( sz < finish ) { // work for more than one segment
+            func( table[k], static_cast<char*>(table[k].array)+element_size*start, sz-start );
+            next_segment();
+        }
+        func( table[k], static_cast<char*>(table[k].array)+element_size*start, finish-start );
+        return k;
+    }
+    inline void *get_segment_ptr(size_type index, bool wait) {
+        segment_t &s = table[index];
+        if( !__TBB_load_with_acquire(s.array) && wait ) {
+            ITT_NOTIFY(sync_prepare, &s.array);
+            spin_wait_while_eq( s.array, (void*)0 );
+            ITT_NOTIFY(sync_acquired, &s.array);
+        }
+        return s.array;
+    }
+    ~helper() {
+        if( sz >= finish ) return; // the work is done correctly
+        cleanup();
+    }
+
+    //! Out of line code to assists destructor in infrequent cases.
+    void cleanup();
+
+    /// TODO: turn into lambda functions when available
+    struct init_body {
+        internal_array_op2 func;
+        const void *arg;
+        init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
+        void operator()(segment_t &, void *begin, size_type n) const {
+            func( begin, arg, n );
+        }
+    };
+    struct safe_init_body {
+        internal_array_op2 func;
+        const void *arg;
+        safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
+        void operator()(segment_t &s, void *begin, size_type n) const {
+            if( s.array <= internal::vector_allocation_error_flag )
+                throw_exception(eid_bad_last_alloc); // throw custom exception
+            func( begin, arg, n );
+        }
+    };
+    struct destroy_body {
+        internal_array_op1 func;
+        destroy_body(internal_array_op1 destroy) : func(destroy) {}
+        void operator()(segment_t &s, void *begin, size_type n) const {
+            if( s.array > internal::vector_allocation_error_flag )
+                func( begin, n );
+        }
+    };
+};
+
+void concurrent_vector_base_v3::helper::extend_segment_table(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type start) {
+    if( start > segment_size(pointers_per_short_table) ) start = segment_size(pointers_per_short_table);
+    // If other threads are trying to set pointers in the short segment, wait for them to finish their
+    // assigments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it
+    for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ )
+        if(!v.my_storage[i].array) {
+            ITT_NOTIFY(sync_prepare, &v.my_storage[i].array);
+            atomic_backoff backoff;
+            do backoff.pause(); while( v.my_segment == v.my_storage && !v.my_storage[i].array );
+            ITT_NOTIFY(sync_acquired, &v.my_storage[i].array);
+        }
+    if( v.my_segment != v.my_storage ) return;
+
+    segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );
+    // No need to check !s here, because NFS_Allocate throws exception if it cannot allocate the requested storage.
+    memset( s, 0, pointers_per_long_table*sizeof(segment_t) );
+    for( segment_index_t i = 0; i < pointers_per_short_table; i++)
+        s[i] = v.my_storage[i];
+    if( v.my_segment.compare_and_swap( s, v.my_storage ) != v.my_storage )
+        NFS_Free( s );
+    // else TODO: add ITT_NOTIFY signals for v.my_segment?
+}
+
+concurrent_vector_base_v3::size_type concurrent_vector_base_v3::helper::enable_segment(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type k, concurrent_vector_base_v3::size_type element_size) {
+    segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block
+    __TBB_ASSERT( s[k].array <= internal::vector_allocation_error_flag, "concurrent operation during growth?" );
+    if( !k ) {
+        assign_first_segment_if_neccessary(v, default_initial_segments-1);
+        __TBB_TRY {
+            publish_segment(s[0], allocate_segment(v, segment_size(v.my_first_block) ) );
+        } __TBB_CATCH(...) { // intercept exception here, assign internal::vector_allocation_error_flag value, re-throw exception
+            publish_segment(s[0], internal::vector_allocation_error_flag);
+            __TBB_RETHROW();
+        }
+        return 2;
+    }
+    size_type m = segment_size(k);
+    if( !v.my_first_block ) // push_back only
+        spin_wait_while_eq( v.my_first_block, segment_index_t(0) );
+    if( k < v.my_first_block ) {
+        // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory
+        void *array0 = __TBB_load_with_acquire(s[0].array);
+        if( !array0 ) {
+            // sync_prepare called only if there is a wait
+            ITT_NOTIFY(sync_prepare, &s[0].array );
+            spin_wait_while_eq( s[0].array, (void*)0 );
+            array0 = __TBB_load_with_acquire(s[0].array);
+        }
+        ITT_NOTIFY(sync_acquired, &s[0].array);
+        if( array0 <= internal::vector_allocation_error_flag ) { // check for internal::vector_allocation_error_flag of initial segment
+            publish_segment(s[k], internal::vector_allocation_error_flag); // and assign internal::vector_allocation_error_flag here
+            throw_exception(eid_bad_last_alloc); // throw custom exception
+        }
+        publish_segment( s[k],
+                static_cast<void*>( static_cast<char*>(array0) + segment_base(k)*element_size )
+        );
+    } else {
+        __TBB_TRY {
+            publish_segment(s[k], allocate_segment(v, m));
+        } __TBB_CATCH(...) { // intercept exception here, assign internal::vector_allocation_error_flag value, re-throw exception
+            publish_segment(s[k], internal::vector_allocation_error_flag);
+            __TBB_RETHROW();
+        }
+    }
+    return m;
+}
+
+void concurrent_vector_base_v3::helper::cleanup() {
+    if( !sz ) { // allocation failed, restore the table
+        segment_index_t k_start = k, k_end = segment_index_of(finish-1);
+        if( segment_base( k_start ) < start )
+            get_segment_ptr(k_start++, true); // wait
+        if( k_start < first_block ) {
+            void *array0 = get_segment_ptr(0, start>0); // wait if necessary
+            if( array0 && !k_start ) ++k_start;
+            if( array0 <= internal::vector_allocation_error_flag )
+                for(; k_start < first_block && k_start <= k_end; ++k_start )
+                    publish_segment(table[k_start], internal::vector_allocation_error_flag);
+            else for(; k_start < first_block && k_start <= k_end; ++k_start )
+                    publish_segment(table[k_start], static_cast<void*>(
+                        static_cast<char*>(array0) + segment_base(k_start)*element_size) );
+        }
+        for(; k_start <= k_end; ++k_start ) // not in first block
+            if( !__TBB_load_with_acquire(table[k_start].array) )
+                publish_segment(table[k_start], internal::vector_allocation_error_flag);
+        // fill alocated items
+        first_segment();
+        goto recover;
+    }
+    while( sz <= finish ) { // there is still work for at least one segment
+        next_segment();
+recover:
+        void *array = table[k].array;
+        if( array > internal::vector_allocation_error_flag )
+            std::memset( static_cast<char*>(array)+element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
+        else __TBB_ASSERT( array == internal::vector_allocation_error_flag, NULL );
+    }
+}
+
+concurrent_vector_base_v3::~concurrent_vector_base_v3() {
+    segment_t* s = my_segment;
+    if( s != my_storage ) {
+        // Clear short segment.
+        for( segment_index_t i = 0; i < pointers_per_short_table; i++)
+            my_storage[i].array = NULL;
+#if TBB_USE_DEBUG
+        for( segment_index_t i = 0; i < pointers_per_long_table; i++)
+            __TBB_ASSERT( my_segment[i].array <= internal::vector_allocation_error_flag, "Segment should have been freed. Please recompile with new TBB before using exceptions.");
+#endif
+        my_segment = my_storage;
+        NFS_Free( s );
+    }
+}
+
+concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_capacity() const {
+    return segment_base( helper::find_segment_end(*this) );
+}
+
+void concurrent_vector_base_v3::internal_throw_exception(size_type t) const {
+    switch(t) {
+        case 0: throw_exception(eid_out_of_range);
+        case 1: throw_exception(eid_segment_range_error);
+        case 2: throw_exception(eid_index_range_error);
+    }
+}
+
+void concurrent_vector_base_v3::internal_reserve( size_type n, size_type element_size, size_type max_size ) {
+    if( n>max_size )
+        throw_exception(eid_reservation_length_error);
+    __TBB_ASSERT( n, NULL );
+    helper::assign_first_segment_if_neccessary(*this, segment_index_of(n-1));
+    segment_index_t k = helper::find_segment_end(*this);
+    __TBB_TRY {
+        for( ; segment_base(k)<n; ++k ) {
+            helper::extend_table_if_necessary(*this, k, 0);
+            if(my_segment[k].array <= internal::vector_allocation_error_flag)
+                helper::enable_segment(*this, k, element_size);
+        }
+    } __TBB_CATCH(...) {
+        my_segment[k].array = NULL;
+        __TBB_RETHROW(); // repair and rethrow
+    }
+}
+
+void concurrent_vector_base_v3::internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy ) {
+    size_type n = src.my_early_size;
+    __TBB_ASSERT( my_segment == my_storage, NULL);
+    if( n ) {
+        helper::assign_first_segment_if_neccessary(*this, segment_index_of(n-1));
+        size_type b;
+        for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
+            if( (src.my_segment == (segment_t*)src.my_storage && k >= pointers_per_short_table)
+                || src.my_segment[k].array <= internal::vector_allocation_error_flag ) {
+                my_early_size = b; break;
+            }
+            helper::extend_table_if_necessary(*this, k, 0);
+            size_type m = helper::enable_segment(*this, k, element_size);
+            if( m > n-b ) m = n-b;
+            my_early_size = b+m;
+            copy( my_segment[k].array, src.my_segment[k].array, m );
+        }
+    }
+}
+
+void concurrent_vector_base_v3::internal_assign( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) {
+    size_type n = src.my_early_size;
+    while( my_early_size>n ) { // TODO: improve
+        segment_index_t k = segment_index_of( my_early_size-1 );
+        size_type b=segment_base(k);
+        size_type new_end = b>=n ? b : n;
+        __TBB_ASSERT( my_early_size>new_end, NULL );
+        if( my_segment[k].array <= internal::vector_allocation_error_flag) // check vector was broken before
+            throw_exception(eid_bad_last_alloc); // throw custom exception
+        // destructors are supposed to not throw any exceptions
+        destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end );
+        my_early_size = new_end;
+    }
+    size_type dst_initialized_size = my_early_size;
+    my_early_size = n;
+    helper::assign_first_segment_if_neccessary(*this, segment_index_of(n));
+    size_type b;
+    for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
+        if( (src.my_segment == (segment_t*)src.my_storage && k >= pointers_per_short_table)
+            || src.my_segment[k].array <= internal::vector_allocation_error_flag ) { // if source is damaged
+                my_early_size = b; break; // TODO: it may cause undestructed items
+        }
+        helper::extend_table_if_necessary(*this, k, 0);
+        if( !my_segment[k].array )
+            helper::enable_segment(*this, k, element_size);
+        else if( my_segment[k].array <= internal::vector_allocation_error_flag )
+            throw_exception(eid_bad_last_alloc); // throw custom exception
+        size_type m = k? segment_size(k) : 2;
+        if( m > n-b ) m = n-b;
+        size_type a = 0;
+        if( dst_initialized_size>b ) {
+            a = dst_initialized_size-b;
+            if( a>m ) a = m;
+            assign( my_segment[k].array, src.my_segment[k].array, a );
+            m -= a;
+            a *= element_size;
+        }
+        if( m>0 )
+            copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m );
+    }
+    __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" );
+}
+
+void* concurrent_vector_base_v3::internal_push_back( size_type element_size, size_type& index ) {
+    __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL );
+    size_type tmp = __TBB_FetchAndIncrementWacquire(&my_early_size);
+    index = tmp;
+    segment_index_t k_old = segment_index_of( tmp );
+    size_type base = segment_base(k_old);
+    helper::extend_table_if_necessary(*this, k_old, tmp);
+    segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp);
+    size_type j_begin = tmp-base;
+    return (void*)((char*)s.array+element_size*j_begin);
+}
+
+void concurrent_vector_base_v3::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) {
+    internal_grow_to_at_least_with_result( new_size, element_size, init, src );
+}
+
+concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) {
+    size_type e = my_early_size;
+    while( e<new_size ) {
+        size_type f = my_early_size.compare_and_swap(new_size,e);
+        if( f==e ) {
+            internal_grow( e, new_size, element_size, init, src );
+            break;
+        }
+        e = f;
+    }
+    // Check/wait for segments allocation completes
+    segment_index_t i, k_old = segment_index_of( new_size-1 );
+    if( k_old >= pointers_per_short_table && my_segment == my_storage ) {
+        spin_wait_while_eq( my_segment, my_storage );
+    }
+    for( i = 0; i <= k_old; ++i ) {
+        segment_t &s = my_segment[i];
+        if(!s.array) {
+            ITT_NOTIFY(sync_prepare, &s.array);
+            atomic_backoff backoff;
+            do backoff.pause();
+            while( !__TBB_load_with_acquire(my_segment[i].array) ); // my_segment may change concurrently
+            ITT_NOTIFY(sync_acquired, &s.array);
+        }
+        if( my_segment[i].array <= internal::vector_allocation_error_flag )
+            throw_exception(eid_bad_last_alloc);
+    }
+#if TBB_USE_DEBUG
+    size_type capacity = internal_capacity();
+    __TBB_ASSERT( capacity >= new_size, NULL);
+#endif
+    return e;
+}
+
+concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src ) {
+    size_type result = my_early_size.fetch_and_add(delta);
+    internal_grow( result, result+delta, element_size, init, src );
+    return result;
+}
+
+void concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) {
+    __TBB_ASSERT( start<finish, "start must be less than finish" );
+    segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1);
+    helper::assign_first_segment_if_neccessary(*this, k_end);
+    helper::extend_table_if_necessary(*this, k_end, start);
+    helper range(my_segment, my_first_block, element_size, k_start, start, finish);
+    for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order
+        helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/);
+    for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order
+        helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start );
+    range.apply( helper::init_body(init, src) );
+}
+
+void concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
+                                                internal_array_op1 destroy, internal_array_op2 init ) {
+    size_type j = my_early_size;
+    if( n > j ) { // construct items
+        internal_reserve(n, element_size, max_size);
+        my_early_size = n;
+        helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n);
+        for_each.apply( helper::safe_init_body(init, src) );
+    } else {
+        my_early_size = n;
+        helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j);
+        for_each.apply( helper::destroy_body(destroy) );
+    }
+}
+
+concurrent_vector_base_v3::segment_index_t concurrent_vector_base_v3::internal_clear( internal_array_op1 destroy ) {
+    __TBB_ASSERT( my_segment, NULL );
+    size_type j = my_early_size;
+    my_early_size = 0;
+    helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero
+    j = for_each.apply( helper::destroy_body(destroy) );
+    size_type i = helper::find_segment_end(*this);
+    return j < i? i : j+1;
+}
+
+void *concurrent_vector_base_v3::internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy )
+{
+    const size_type my_size = my_early_size;
+    const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments
+    const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;..
+    const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics
+
+    segment_index_t k = first_block;
+    if(k_stop < first_block)
+        k = k_stop;
+    else
+        while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++;
+    if(k_stop == k_end && k == first_block)
+        return NULL;
+
+    segment_t *const segment_table = my_segment;
+    internal_segments_table &old = *static_cast<internal_segments_table*>( table );
+    memset(&old, 0, sizeof(old));
+
+    if ( k != first_block && k ) // first segment optimization
+    {
+        // exception can occur here
+        void *seg = old.table[0] = helper::allocate_segment( *this, segment_size(k) );
+        old.first_block = k; // fill info for freeing new segment if exception occurs
+        // copy items to the new segment
+        size_type my_segment_size = segment_size( first_block );
+        for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
+            __TBB_ASSERT( segment_table[i].array > internal::vector_allocation_error_flag, NULL);
+            void *s = static_cast<void*>(
+                static_cast<char*>(seg) + segment_base(i)*element_size );
+            if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
+            __TBB_TRY { // exception can occur here
+                copy( s, segment_table[i].array, my_segment_size );
+            } __TBB_CATCH(...) { // destroy all the already copied items
+                helper for_each(reinterpret_cast<segment_t*>(&old.table[0]), old.first_block, element_size,
+                    0, 0, segment_base(i)+my_segment_size);
+                for_each.apply( helper::destroy_body(destroy) );
+                __TBB_RETHROW();
+            }
+            my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
+        }
+        // commit the changes
+        memcpy(old.table, segment_table, k * sizeof(segment_t));
+        for (segment_index_t i = 0; i < k; i++) {
+            segment_table[i].array = static_cast<void*>(
+                static_cast<char*>(seg) + segment_base(i)*element_size );
+        }
+        old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block
+        // destroy original copies
+        my_segment_size = segment_size( first_block ); // old.first_block actually
+        for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
+            if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
+            // destructors are supposed to not throw any exceptions
+            destroy( old.table[i], my_segment_size );
+            my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
+        }
+    }
+    // free unnecessary segments allocated by reserve() call
+    if ( k_stop < k_end ) {
+        old.first_block = first_block;
+        memcpy(old.table+k_stop, segment_table+k_stop, (k_end-k_stop) * sizeof(segment_t));
+        memset(segment_table+k_stop, 0, (k_end-k_stop) * sizeof(segment_t));
+        if( !k ) my_first_block = 0;
+    }
+    return table;
+}
+
+void concurrent_vector_base_v3::internal_swap(concurrent_vector_base_v3& v)
+{
+    size_type my_sz = my_early_size, v_sz = v.my_early_size;
+    if(!my_sz && !v_sz) return;
+    size_type tmp = my_first_block; my_first_block = v.my_first_block; v.my_first_block = tmp;
+    bool my_short = (my_segment == my_storage), v_short  = (v.my_segment == v.my_storage);
+    if ( my_short && v_short ) { // swap both tables
+        char tbl[pointers_per_short_table * sizeof(segment_t)];
+        memcpy(tbl, my_storage, pointers_per_short_table * sizeof(segment_t));
+        memcpy(my_storage, v.my_storage, pointers_per_short_table * sizeof(segment_t));
+        memcpy(v.my_storage, tbl, pointers_per_short_table * sizeof(segment_t));
+    }
+    else if ( my_short ) { // my -> v
+        memcpy(v.my_storage, my_storage, pointers_per_short_table * sizeof(segment_t));
+        my_segment = v.my_segment; v.my_segment = v.my_storage;
+    }
+    else if ( v_short ) { // v -> my
+        memcpy(my_storage, v.my_storage, pointers_per_short_table * sizeof(segment_t));
+        v.my_segment = my_segment; my_segment = my_storage;
+    } else {
+        segment_t *ptr = my_segment; my_segment = v.my_segment; v.my_segment = ptr;
+    }
+    my_early_size = v_sz; v.my_early_size = my_sz;
+}
+
+} // namespace internal
+
+} // tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/condition_variable.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/condition_variable.cpp
new file mode 100644 (file)
index 0000000..d4b63ec
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/compat/condition_variable"
+#include "tbb/atomic.h"
+#include "dynamic_link.h"
+#include "itt_notify.h"
+
+namespace tbb {
+
+namespace internal {
+
+//condition_variable
+#if _WIN32||_WIN64
+using tbb::interface5::internal::condition_variable_using_event;
+
+static atomic<int> condvar_module_inited;
+
+void WINAPI init_condvar_using_event( condition_variable_using_event* cv_event )
+{
+    cv_event->event = CreateEvent( NULL, TRUE/*manual reset*/, FALSE/*not signalled initially*/, NULL);
+    InitializeCriticalSection( &cv_event->mutex );
+    cv_event->n_waiters = 0;
+    cv_event->release_count = 0;
+    cv_event->epoch = 0;
+}
+
+BOOL WINAPI sleep_condition_variable_cs_using_event( condition_variable_using_event* cv_event, LPCRITICAL_SECTION cs, DWORD dwMilliseconds )
+{
+    EnterCriticalSection( &cv_event->mutex );
+    ++cv_event->n_waiters;
+    unsigned my_generation = cv_event->epoch;
+    LeaveCriticalSection( &cv_event->mutex );
+    LeaveCriticalSection( cs );
+    for (;;) {
+        // should come here at least once
+        DWORD rc = WaitForSingleObject( cv_event->event, dwMilliseconds );
+        EnterCriticalSection( &cv_event->mutex );
+        if( rc!=WAIT_OBJECT_0 ) {
+            --cv_event->n_waiters;
+            LeaveCriticalSection( &cv_event->mutex );
+            if( rc==WAIT_TIMEOUT ) {
+                SetLastError( WAIT_TIMEOUT );
+                EnterCriticalSection( cs );
+            }
+            return false;
+        }
+        __TBB_ASSERT( rc==WAIT_OBJECT_0, NULL );
+        if( cv_event->release_count>0 && cv_event->epoch!=my_generation )
+            break;
+        LeaveCriticalSection( &cv_event->mutex );
+    }
+
+    // still in the critical section
+    --cv_event->n_waiters;
+    int count = --cv_event->release_count;
+    LeaveCriticalSection( &cv_event->mutex );
+
+    if( count==0 ) {
+        __TBB_ASSERT( cv_event->event, "Premature destruction of condition variable?" );
+        ResetEvent( cv_event->event );
+    }
+    EnterCriticalSection( cs );
+    return true;
+}
+
+void WINAPI wake_condition_variable_using_event( condition_variable_using_event* cv_event )
+{
+    EnterCriticalSection( &cv_event->mutex );
+    if( cv_event->n_waiters>cv_event->release_count ) {
+        SetEvent( cv_event->event ); // Signal the manual-reset event.
+        ++cv_event->release_count;
+        ++cv_event->epoch;
+    }
+    LeaveCriticalSection( &cv_event->mutex );
+}
+
+void WINAPI wake_all_condition_variable_using_event( condition_variable_using_event* cv_event )
+{
+    EnterCriticalSection( &cv_event->mutex );
+    if( cv_event->n_waiters>0 ) {
+        SetEvent( cv_event->event );
+        cv_event->release_count = cv_event->n_waiters;
+        ++cv_event->epoch;
+    }
+    LeaveCriticalSection( &cv_event->mutex );
+}
+
+void WINAPI destroy_condvar_using_event( condition_variable_using_event* cv_event )
+{
+    HANDLE my_event = cv_event->event;
+    EnterCriticalSection( &cv_event->mutex );
+    // NULL is an invalid HANDLE value
+    cv_event->event = NULL;
+    if( cv_event->n_waiters>0 ) {
+        LeaveCriticalSection( &cv_event->mutex );
+        spin_wait_until_eq( cv_event->n_waiters, 0 );
+        // make sure the last thread completes its access to cv
+        EnterCriticalSection( &cv_event->mutex );
+    }
+    LeaveCriticalSection( &cv_event->mutex );
+    CloseHandle( my_event );
+}
+
+void WINAPI destroy_condvar_noop( CONDITION_VARIABLE* /*cv*/ ) { /*no op*/ }
+
+static void (WINAPI *__TBB_init_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&init_condvar_using_event;
+static BOOL (WINAPI *__TBB_condvar_wait)( PCONDITION_VARIABLE, LPCRITICAL_SECTION, DWORD ) = (BOOL (WINAPI *)(PCONDITION_VARIABLE,LPCRITICAL_SECTION, DWORD))&sleep_condition_variable_cs_using_event;
+static void (WINAPI *__TBB_condvar_notify_one)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_condition_variable_using_event;
+static void (WINAPI *__TBB_condvar_notify_all)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_all_condition_variable_using_event;
+static void (WINAPI *__TBB_destroy_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_using_event;
+
+//! Table describing the how to link the handlers.
+static const dynamic_link_descriptor CondVarLinkTable[] = {
+    DLD(InitializeConditionVariable, __TBB_init_condvar),
+    DLD(SleepConditionVariableCS,    __TBB_condvar_wait),
+    DLD(WakeConditionVariable,       __TBB_condvar_notify_one),
+    DLD(WakeAllConditionVariable,    __TBB_condvar_notify_all)
+};
+
+void init_condvar_module()
+{
+    __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL );
+    if( dynamic_link( "Kernel32.dll", CondVarLinkTable, 4 ) )
+        __TBB_destroy_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_noop;
+}
+#endif /* _WIN32||_WIN64 */
+
+} // namespace internal
+
+#if _WIN32||_WIN64
+
+namespace interface5 {
+namespace internal {
+
+using tbb::internal::condvar_module_inited;
+using tbb::internal::__TBB_init_condvar;
+using tbb::internal::__TBB_condvar_wait;
+using tbb::internal::__TBB_condvar_notify_one;
+using tbb::internal::__TBB_condvar_notify_all;
+using tbb::internal::__TBB_destroy_condvar;
+using tbb::internal::init_condvar_module;
+
+void internal_initialize_condition_variable( condvar_impl_t& cv )
+{
+    if( condvar_module_inited!=2 ) {
+        if( condvar_module_inited==0 ) {
+            if( condvar_module_inited.compare_and_swap( 1, 0 )==0 ) {
+                init_condvar_module();
+                condvar_module_inited = 2;
+            }
+        } 
+
+        spin_wait_until_eq( condvar_module_inited, 2 );
+    }
+    __TBB_init_condvar( &cv.cv_native );
+}
+
+void internal_destroy_condition_variable( condvar_impl_t& cv )
+{
+    __TBB_destroy_condvar( &cv.cv_native );
+}
+
+void internal_condition_variable_notify_one( condvar_impl_t& cv )
+{
+    __TBB_condvar_notify_one ( &cv.cv_native );
+}
+
+void internal_condition_variable_notify_all( condvar_impl_t& cv )
+{
+    __TBB_condvar_notify_all( &cv.cv_native );
+}
+
+bool internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i )
+{
+    DWORD duration = i ? DWORD((i->seconds()*1000)) : INFINITE;
+    mtx->set_state( mutex::INITIALIZED );
+    BOOL res = __TBB_condvar_wait( &cv.cv_native, mtx->native_handle(), duration );
+    mtx->set_state( mutex::HELD );
+    return res?true:false;
+}
+
+} // namespace internal
+} // nameespace interface5
+
+#endif /* _WIN32||_WIN64 */
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/critical_section.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/critical_section.cpp
new file mode 100644 (file)
index 0000000..c67621f
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/critical_section.h"
+#include "itt_notify.h"
+
+namespace tbb {
+    namespace internal {
+
+void critical_section_v4::internal_construct() {
+    ITT_SYNC_CREATE(&my_impl, _T("ppl::critical_section"), _T(""));
+}
+}  // namespace internal
+}  // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/custom_scheduler.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/custom_scheduler.h
new file mode 100644 (file)
index 0000000..9151127
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_custom_scheduler_H
+#define _TBB_custom_scheduler_H
+
+#include "scheduler.h"
+#include "observer_proxy.h"
+#include "itt_notify.h"
+
+namespace tbb {
+namespace internal {
+
+//! Amount of time to pause between steals.
+/** The default values below were found to be best empirically for K-Means
+    on the 32-way Altix and 4-way (*2 for HT) fxqlin04. */
+#if __TBB_ipf
+static const long PauseTime = 1500;
+#else 
+static const long PauseTime = 80;
+#endif
+
+//------------------------------------------------------------------------
+//! Traits classes for scheduler
+//------------------------------------------------------------------------
+
+struct DefaultSchedulerTraits {
+    static const bool itt_possible = true;
+    static const bool has_slow_atomic = false;
+};
+
+struct IntelSchedulerTraits {
+    static const bool itt_possible = false;
+#if __TBB_x86_32||__TBB_x86_64
+    static const bool has_slow_atomic = true;
+#else
+    static const bool has_slow_atomic = false;
+#endif /* __TBB_x86_32||__TBB_x86_64 */
+};
+
+//------------------------------------------------------------------------
+// custom_scheduler
+//------------------------------------------------------------------------
+
+//! A scheduler with a customized evaluation loop.
+/** The customization can use SchedulerTraits to make decisions without needing a run-time check. */
+template<typename SchedulerTraits>
+class custom_scheduler: private generic_scheduler {
+    typedef custom_scheduler<SchedulerTraits> scheduler_type;
+
+    //! Scheduler loop that dispatches tasks.
+    /** If child is non-NULL, it is dispatched first.
+        Then, until "parent" has a reference count of 1, other task are dispatched or stolen. */
+    /*override*/
+    void local_wait_for_all( task& parent, task* child );
+
+    //! Entry point from client code to the scheduler loop that dispatches tasks. 
+    /** The method is virtual, but the *this object is used only for sake of dispatching on the correct vtable,
+        not necessarily the correct *this object.  The correct *this object is looked up in TLS. */
+    /*override*/
+    void wait_for_all( task& parent, task* child ) {
+        static_cast<custom_scheduler*>(governor::local_scheduler())->scheduler_type::local_wait_for_all( parent, child );
+    }
+
+    //! Construct a custom_scheduler
+    custom_scheduler( arena* a, size_t index ) : generic_scheduler(a, index) {}
+
+    //! Decrements ref_count of a predecessor.
+    /** If it achieves 0, the predecessor is scheduled for execution.
+        When changing, remember that this is a hot path function. */
+    void tally_completion_of_predecessor( task& s, task*& bypass_slot ) {
+        task_prefix& p = s.prefix();
+        if( SchedulerTraits::itt_possible )
+            ITT_NOTIFY(sync_releasing, &p.ref_count);
+        if( SchedulerTraits::has_slow_atomic && p.ref_count==1 ) {
+            p.ref_count=0;
+        } else {
+            if( __TBB_FetchAndDecrementWrelease(&p.ref_count) > 1 ) // more references exist
+                return;
+        }
+        __TBB_ASSERT(p.ref_count==0, "completion of task caused predecessor's reference count to underflow");
+        if( SchedulerTraits::itt_possible )
+            ITT_NOTIFY(sync_acquired, &p.ref_count);
+#if TBB_USE_ASSERT
+        p.extra_state &= ~es_ref_count_active;
+#endif /* TBB_USE_ASSERT */
+
+        if( bypass_slot==NULL )
+            bypass_slot = &s;
+        else
+            local_spawn( s, s.prefix().next );
+    }
+
+public:
+    static generic_scheduler* allocate_scheduler( arena* a, size_t index ) {
+#if !__TBB_ARENA_PER_MASTER
+        __TBB_ASSERT( a, "missing arena" );
+#endif /* !__TBB_ARENA_PER_MASTER */
+        scheduler_type* s = (scheduler_type*)NFS_Allocate(sizeof(scheduler_type),1,NULL);
+        new( s ) scheduler_type( a, index );
+        s->assert_task_pool_valid();
+        ITT_SYNC_CREATE(s, SyncType_Scheduler, SyncObj_TaskPoolSpinning);
+        return s;
+    }
+
+    //! Try getting a task from the mailbox or stealing from another scheduler.
+    /** Returns the stolen task or NULL if all attempts fail. */
+    /* override */ task* receive_or_steal_task( reference_count&, bool );
+
+}; // class custom_scheduler<>
+
+//------------------------------------------------------------------------
+// custom_scheduler methods
+//------------------------------------------------------------------------
+
+template<typename SchedulerTraits>
+task* custom_scheduler<SchedulerTraits>::receive_or_steal_task( reference_count& completion_ref_count,
+                                                                bool return_if_no_work ) {
+    task* t = NULL;
+    inbox.set_is_idle( true );
+    // The state "failure_count==-1" is used only when itt_possible is true,
+    // and denotes that a sync_prepare has not yet been issued.
+    for( int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
+        if( completion_ref_count==1 ) {
+            if( SchedulerTraits::itt_possible ) {
+                if( failure_count!=-1 ) {
+                    ITT_NOTIFY(sync_prepare, &completion_ref_count);
+                    // Notify Intel(R) Thread Profiler that thread has stopped spinning.
+                    ITT_NOTIFY(sync_acquired, this);
+                }
+                ITT_NOTIFY(sync_acquired, &completion_ref_count);
+            }
+            inbox.set_is_idle( false );
+            return NULL;
+        }
+#if __TBB_ARENA_PER_MASTER
+        size_t n = my_arena->my_limit;
+        __TBB_ASSERT( arena_index < n, NULL );
+#else /* !__TBB_ARENA_PER_MASTER */
+        size_t n = my_arena->prefix().limit;
+#endif /* !__TBB_ARENA_PER_MASTER */
+        if( n>1 ) {
+            if( my_affinity_id && (t=get_mailbox_task()) ) {
+                GATHER_STATISTIC( ++my_counters.mails_received );
+            }
+#if __TBB_ARENA_PER_MASTER
+            // Check if there are tasks in starvation-resistant stream.
+            // Only allowed for workers with empty stack, which is identified by return_if_no_work.
+            else if ( return_if_no_work && (t=dequeue_task()) ) {
+                // just proceed with the obtained task
+            }
+            // Check if the resource manager requires our arena to relinquish some threads 
+            else if ( return_if_no_work && (my_arena->my_num_workers_allotted < my_arena->num_workers_active()) ) {
+                if( SchedulerTraits::itt_possible ) {
+                    if( failure_count!=-1 )
+                        ITT_NOTIFY(sync_cancel, this);
+                }
+                return NULL;
+            }
+#endif /* __TBB_ARENA_PER_MASTER */
+            else {
+                // Try to steal a task from a random victim.
+                if ( !can_steal() )
+                    goto fail;
+                size_t k = random.get() % (n-1);
+                arena_slot* victim = &my_arena->slot[k];
+                // The following condition excludes the master that might have 
+                // already taken our previous place in the arena from the list .
+                // of potential victims. But since such a situation can take 
+                // place only in case of significant oversubscription, keeping
+                // the checks simple seems to be preferable to complicating the code.
+                if( k >= arena_index )
+                    ++victim;               // Adjusts random distribution to exclude self
+                t = steal_task( *victim );
+                if( !t ) goto fail;
+                if( is_proxy(*t) ) {
+                    t = strip_proxy((task_proxy*)t);
+                    if( !t ) goto fail;
+                    GATHER_STATISTIC( ++my_counters.proxies_stolen );
+                }
+                t->prefix().extra_state |= es_task_is_stolen;
+                if( is_version_3_task(*t) ) {
+                    innermost_running_task = t;
+                    t->note_affinity( my_affinity_id );
+                }
+                GATHER_STATISTIC( ++my_counters.steals_committed );
+            }
+            __TBB_ASSERT(t,NULL);
+#if __TBB_SCHEDULER_OBSERVER
+            // No memory fence required for read of global_last_observer_proxy, because prior fence on steal/mailbox suffices.
+            if( local_last_observer_proxy!=global_last_observer_proxy ) {
+                notify_entry_observers();
+            }
+#endif /* __TBB_SCHEDULER_OBSERVER */
+            if( SchedulerTraits::itt_possible ) {
+                if( failure_count!=-1 ) {
+                    // FIXME - might be victim, or might be selected from a mailbox
+                    // Notify Intel(R) Thread Profiler that thread has stopped spinning.
+                    ITT_NOTIFY(sync_acquired, this);
+                }
+            }
+            inbox.set_is_idle( false );
+            break; // jumps to: return t;
+        }
+fail:
+        GATHER_STATISTIC( ++my_counters.steals_failed );
+        if( SchedulerTraits::itt_possible && failure_count==-1 ) {
+            // The first attempt to steal work failed, so notify Intel(R) Thread Profiler that
+            // the thread has started spinning.  Ideally, we would do this notification
+            // *before* the first failed attempt to steal, but at that point we do not
+            // know that the steal will fail.
+            ITT_NOTIFY(sync_prepare, this);
+            failure_count = 0;
+        }
+        // Pause, even if we are going to yield, because the yield might return immediately.
+        __TBB_Pause(PauseTime);
+        int yield_threshold = 2*int(n);
+        if( failure_count>=yield_threshold ) {
+            __TBB_Yield();
+            if( failure_count>=yield_threshold+100 ) {
+                // When a worker thread has nothing to do, return it to RML.
+                // For purposes of affinity support, the thread is considered idle while in RML.
+                if( return_if_no_work && my_arena->is_out_of_work() ) {
+                    if( SchedulerTraits::itt_possible ) {
+                        if( failure_count!=-1 )
+                            ITT_NOTIFY(sync_cancel, this);
+                    }
+                    return NULL;
+                }
+                failure_count = yield_threshold;
+            }
+        }
+    }
+    return t;
+}
+
+template<typename SchedulerTraits>
+void custom_scheduler<SchedulerTraits>::local_wait_for_all( task& parent, task* child ) {
+    __TBB_ASSERT( governor::is_set(this), NULL );
+    if( child ) {
+        child->prefix().owner = this;
+    }
+    __TBB_ASSERT( parent.ref_count() >= (child && child->parent() == &parent ? 2 : 1), "ref_count is too small" );
+    assert_task_pool_valid();
+    // Using parent's refcount in sync_prepare (in the stealing loop below) is 
+    // a workaround for TP. We need to name it here to display correctly in Ampl.
+    if( SchedulerTraits::itt_possible )
+        ITT_SYNC_CREATE(&parent.prefix().ref_count, SyncType_Scheduler, SyncObj_TaskStealingLoop);
+#if __TBB_TASK_GROUP_CONTEXT
+    __TBB_ASSERT( parent.prefix().context || (is_worker() && &parent == dummy_task), "parent task does not have context" );
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    task* t = child;
+    // Constant all_local_work_done is an unreacheable refcount value that prevents
+    // early quitting the dispatch loop. It is defined to be in the middle of the range 
+    // of negative values representable by the reference_count type.
+    static const reference_count 
+        // For normal dispatch loops
+        parents_work_done = 1,
+        // For termination dispatch loops in masters
+        all_local_work_done = (reference_count)3 << (sizeof(reference_count) * 8 - 2);
+    reference_count quit_point;
+    if( innermost_running_task == dummy_task ) {
+        // We are in the outermost task dispatch loop of a master thread,
+        __TBB_ASSERT( !is_worker(), NULL );
+        quit_point = &parent == dummy_task ? all_local_work_done : parents_work_done;
+    } else {
+        quit_point = parents_work_done;
+    }
+    task* old_innermost_running_task = innermost_running_task;
+#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
+exception_was_caught:
+    try {
+#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
+    // Outer loop steals tasks when necessary.
+    for(;;) {
+        // Middle loop evaluates tasks that are pulled off "array".
+        do {
+            // Inner loop evaluates tasks that are handed directly to us by other tasks.
+            while(t) {
+                __TBB_ASSERT( inbox.is_idle_state(false), NULL );
+#if TBB_USE_ASSERT
+                __TBB_ASSERT(!is_proxy(*t),"unexpected proxy");
+                __TBB_ASSERT( t->prefix().owner==this, NULL );
+#if __TBB_TASK_GROUP_CONTEXT
+                if ( !t->prefix().context->my_cancellation_requested ) 
+#endif
+                    __TBB_ASSERT( 1L<<t->state() & (1L<<task::allocated|1L<<task::ready|1L<<task::reexecute), NULL );
+                assert_task_pool_valid();
+#endif /* TBB_USE_ASSERT */
+                task* t_next = NULL;
+                innermost_running_task = t;
+                t->prefix().state = task::executing;
+#if __TBB_TASK_GROUP_CONTEXT
+                if ( !t->prefix().context->my_cancellation_requested )
+#endif
+                {
+                    GATHER_STATISTIC( ++my_counters.tasks_executed );
+#if __TBB_TASK_GROUP_CONTEXT
+                    if( SchedulerTraits::itt_possible )
+                        ITT_STACK(callee_enter, t->prefix().context->itt_caller);
+#endif
+                    t_next = t->execute();
+#if __TBB_TASK_GROUP_CONTEXT
+                    if( SchedulerTraits::itt_possible )
+                        ITT_STACK(callee_leave, t->prefix().context->itt_caller);
+#endif
+                    if (t_next) {
+                        __TBB_ASSERT( t_next->state()==task::allocated,
+                                "if task::execute() returns task, it must be marked as allocated" );
+                        t_next->prefix().extra_state &= ~es_task_is_stolen;
+#if TBB_USE_ASSERT
+                        affinity_id next_affinity=t_next->prefix().affinity;
+                        if (next_affinity != 0 && next_affinity != my_affinity_id)
+                            GATHER_STATISTIC( ++my_counters.affinity_ignored );
+#endif
+                    }
+                }
+                assert_task_pool_valid();
+                switch( task::state_type(t->prefix().state) ) {
+                    case task::executing: {
+                        task* s = t->parent();
+                        __TBB_ASSERT( innermost_running_task==t, NULL );
+                        __TBB_ASSERT( t->prefix().ref_count==0, "Task still has children after it has been executed" );
+                        t->~task();
+                        if( s )
+                            tally_completion_of_predecessor(*s, t_next);
+                        free_task<no_hint>( *t );
+                        assert_task_pool_valid();
+                        break;
+                    }
+
+                    case task::recycle: // set by recycle_as_safe_continuation()
+                        t->prefix().state = task::allocated;
+                        __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" );
+                        t->prefix().extra_state &= ~es_task_is_stolen;
+                        // for safe continuation, need atomically decrement ref_count;
+                        tally_completion_of_predecessor(*t, t_next);
+                        assert_task_pool_valid();
+                        break;
+
+                    case task::reexecute: // set by recycle_to_reexecute()
+                        __TBB_ASSERT( t_next, "reexecution requires that method execute() return another task" );
+                        __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" );
+                        t->prefix().state = task::allocated;
+                        t->prefix().extra_state &= ~es_task_is_stolen;
+                        local_spawn( *t, t->prefix().next );
+                        assert_task_pool_valid();
+                        break;
+                    case task::allocated:
+                        t->prefix().extra_state &= ~es_task_is_stolen;
+                        break;
+#if TBB_USE_ASSERT
+                    case task::ready:
+                        __TBB_ASSERT( false, "task is in READY state upon return from method execute()" );
+                        break;
+                    default:
+                        __TBB_ASSERT( false, "illegal state" );
+#else
+                    default: // just to shut up some compilation warnings
+                        break;
+#endif /* TBB_USE_ASSERT */
+                }
+
+                if( t_next ) {
+                    // The store here has a subtle secondary effect - it fetches *t_next into cache.
+                    t_next->prefix().owner = this;
+                    GATHER_STATISTIC( ++my_counters.spawns_bypassed );
+                }
+                t = t_next;
+            } // end of scheduler bypass loop
+            assert_task_pool_valid();
+
+            if ( parent.prefix().ref_count == quit_point )
+                break;
+            t = get_task();
+            __TBB_ASSERT(!t || !is_proxy(*t),"unexpected proxy");
+#if TBB_USE_ASSERT
+            assert_task_pool_valid();
+            if(t) {
+                assert_task_valid(*t);
+                __TBB_ASSERT( t->prefix().owner==this, "thread got task that it does not own" );
+            }
+#endif /* TBB_USE_ASSERT */
+        } while( t ); // end of local task array processing loop
+
+        if ( quit_point == all_local_work_done ) {
+            __TBB_ASSERT( my_arena_slot == &dummy_slot && my_arena_slot->head == 0 && my_arena_slot->tail == 0, NULL );
+            innermost_running_task = old_innermost_running_task;
+            return;
+        }
+#if __TBB_ARENA_PER_MASTER
+        __TBB_ASSERT( my_arena->my_max_num_workers > 0 || parent.prefix().ref_count == 1, "deadlock detected" );
+#else /* !__TBB_ARENA_PER_MASTER */
+        __TBB_ASSERT( my_arena->prefix().number_of_workers>0||parent.prefix().ref_count==1, "deadlock detected" );
+#endif /* !__TBB_ARENA_PER_MASTER */
+        // old_innermost_running_task is NULL *iff* a worker thread is in its "inborn" dispath loop
+        // (i.e. its execution stack is empty), and it should return from there if no work is available.
+        t = receive_or_steal_task( parent.prefix().ref_count, !old_innermost_running_task );
+        if (!t) {
+            if( parent.prefix().ref_count==1 ) goto done;
+            __TBB_ASSERT( is_worker() && !old_innermost_running_task, "a thread exits dispatch loop prematurely" );
+            innermost_running_task = NULL;
+            return;
+        }
+        __TBB_ASSERT(t,NULL);
+        __TBB_ASSERT(!is_proxy(*t),"unexpected proxy");
+        t->prefix().owner = this;
+    } // end of stealing loop
+#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
+    } TbbCatchAll( t->prefix().context );
+
+    if( task::state_type(t->prefix().state) == task::recycle ) { // state set by recycle_as_safe_continuation()
+        t->prefix().state = task::allocated;
+        // for safe continuation, need to atomically decrement ref_count;
+        if( SchedulerTraits::itt_possible )
+            ITT_NOTIFY(sync_releasing, &t->prefix().ref_count);
+        if( __TBB_FetchAndDecrementWrelease(&t->prefix().ref_count)==1 ) {
+            if( SchedulerTraits::itt_possible )
+                ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
+        }else{
+            t = NULL;
+        }
+    }
+    goto exception_was_caught;
+#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
+done:
+    if ( !ConcurrentWaitsEnabled(parent) )
+        parent.prefix().ref_count = 0;
+#if TBB_USE_ASSERT
+    parent.prefix().extra_state &= ~es_ref_count_active;
+#endif /* TBB_USE_ASSERT */
+    innermost_running_task = old_innermost_running_task;
+#if __TBB_TASK_GROUP_CONTEXT
+    __TBB_ASSERT(parent.prefix().context && dummy_task->prefix().context, NULL);
+    task_group_context* parent_ctx = parent.prefix().context;
+    if ( parent_ctx->my_cancellation_requested ) {
+        task_group_context::exception_container_type *pe = parent_ctx->my_exception;
+        if ( innermost_running_task == dummy_task && parent_ctx == dummy_task->prefix().context ) {
+            // We are in the outermost task dispatch loop of a master thread, and 
+            // the whole task tree has been collapsed. So we may clear cancellation data.
+            parent_ctx->my_cancellation_requested = 0;
+            __TBB_ASSERT(dummy_task->prefix().context == parent_ctx || !CancellationInfoPresent(*dummy_task), 
+                         "Unexpected exception or cancellation data in the dummy task");
+            // If possible, add assertion that master's dummy task context does not have children
+        }
+        if ( pe )
+            pe->throw_self();
+    }
+    __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*dummy_task), 
+                 "Worker's dummy task context modified");
+    __TBB_ASSERT(innermost_running_task != dummy_task || !CancellationInfoPresent(*dummy_task), 
+                 "Unexpected exception or cancellation data in the master's dummy task");
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    assert_task_pool_valid();
+}
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_custom_scheduler_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.cpp
new file mode 100644 (file)
index 0000000..abefa62
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "dynamic_link.h"
+
+#ifndef LIBRARY_ASSERT
+#include "tbb/tbb_stddef.h"
+#define LIBRARY_ASSERT(x,y) __TBB_ASSERT(x,y)
+#endif /* LIBRARY_ASSERT */
+
+#if _WIN32||_WIN64
+    #include <malloc.h>     /* alloca */
+#else
+    #include <dlfcn.h>
+#if __FreeBSD__
+    #include <stdlib.h>     /* alloca */
+#else
+    #include <alloca.h>
+#endif
+#endif
+
+OPEN_INTERNAL_NAMESPACE
+
+#if __TBB_WEAK_SYMBOLS
+
+bool dynamic_link( void*, const dynamic_link_descriptor descriptors[], size_t n, size_t required )
+{
+    if ( required == ~(size_t)0 )
+        required = n;
+    LIBRARY_ASSERT( required<=n, "Number of required entry points exceeds their total number" );
+    size_t k = 0;
+    // Check if the first required entries are present in what was loaded into our process
+    while ( k < required && descriptors[k].ptr )
+        ++k;
+    if ( k < required )
+        return false;
+    // Commit all the entry points.
+    for ( k = 0; k < n; ++k )
+        *descriptors[k].handler = (pointer_to_handler) descriptors[k].ptr;
+    return true;
+}
+
+#else /* !__TBB_WEAK_SYMBOLS */
+
+bool dynamic_link( void* module, const dynamic_link_descriptor descriptors[], size_t n, size_t required )
+{
+    pointer_to_handler *h = (pointer_to_handler*)alloca(n * sizeof(pointer_to_handler));
+    if ( required == ~(size_t)0 )
+        required = n;
+    LIBRARY_ASSERT( required<=n, "Number of required entry points exceeds their total number" );
+    size_t k = 0;
+    for ( ; k < n; ++k ) {
+#if _WIN32||_WIN64
+        h[k] = pointer_to_handler(GetProcAddress( (HMODULE)module, descriptors[k].name ));
+#else
+        // Lvalue casting is used; this way icc -strict-ansi does not warn about nonstandard pointer conversion
+        (void *&)h[k] = dlsym( module, descriptors[k].name );
+#endif /* _WIN32||_WIN64 */
+        if ( !h[k] && k < required )
+            return false;
+    }
+    LIBRARY_ASSERT( k == n, "if required entries are initialized, all entries are expected to be walked");
+    // Commit the entry points.
+    // Cannot use memset here, because the writes must be atomic.
+    for( k = 0; k < n; ++k )
+        *descriptors[k].handler = h[k];
+    return true;
+}
+
+#endif /* !__TBB_WEAK_SYMBOLS */
+
+bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t n, size_t required, dynamic_link_handle* handle )
+{
+#if _WIN32||_WIN64
+    // Interpret non-NULL handle parameter as request to really link against another library.
+    if ( !handle && dynamic_link( GetModuleHandle(NULL), descriptors, n, required ) )
+        // Target library was statically linked into this executable
+        return true;
+    // Prevent Windows from displaying silly message boxes if it fails to load library
+    // (e.g. because of MS runtime problems - one of those crazy manifest related ones)
+#if _XBOX
+    dynamic_link_handle module = LoadLibrary (library);
+#else
+    UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS);
+    dynamic_link_handle module = LoadLibrary (library);
+    SetErrorMode (prev_mode);
+#endif /* _XBOX */
+#else
+    dynamic_link_handle module = dlopen( library, RTLD_LAZY ); 
+#endif /* _WIN32||_WIN64 */
+    if( module ) {
+        if( !dynamic_link( module, descriptors, n, required ) ) {
+            // Return true if the library is there and it contains all the expected entry points.
+            dynamic_unlink(module);
+            module = NULL;
+        }
+    }
+    if( handle ) 
+        *handle = module;
+    return module!=NULL;
+}
+
+void dynamic_unlink( dynamic_link_handle handle ) {
+    if( handle ) {
+#if _WIN32||_WIN64
+        FreeLibrary( handle );
+#else
+        dlclose( handle );
+#endif /* _WIN32||_WIN64 */    
+    }
+}
+
+CLOSE_INTERNAL_NAMESPACE
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/dynamic_link.h
new file mode 100644 (file)
index 0000000..6e048b1
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_dynamic_link
+#define __TBB_dynamic_link
+
+// Support for dynamically linking to a shared library.
+// By default, the symbols defined here go in namespace tbb::internal.
+// The symbols can be put in another namespace by defining the preprocessor
+// symbols OPEN_INTERNAL_NAMESPACE and CLOSE_INTERNAL_NAMESPACE to open and
+// close the other namespace.  See default definition below for an example.
+
+#ifndef OPEN_INTERNAL_NAMESPACE
+#define OPEN_INTERNAL_NAMESPACE namespace tbb { namespace internal {
+#define CLOSE_INTERNAL_NAMESPACE }}
+#endif /* OPEN_INTERNAL_NAMESPACE */
+
+#include <stddef.h>
+#if _WIN32||_WIN64
+#include "tbb/machine/windows_api.h"
+#endif /* _WIN32||_WIN64 */
+
+OPEN_INTERNAL_NAMESPACE
+
+//! Type definition for a pointer to a void somefunc(void)
+typedef void (*pointer_to_handler)();
+
+// Double cast through the void* from func_ptr in DLD macro is necessary to 
+// prevent warnings from some compilers (g++ 4.1)
+#if __TBB_WEAK_SYMBOLS
+
+#define DLD(s,h) {(pointer_to_handler)&s, (pointer_to_handler*)(void*)(&h)}
+//! Association between a handler name and location of pointer to it.
+struct dynamic_link_descriptor {
+    //! pointer to the handler
+    pointer_to_handler ptr;
+    //! Pointer to the handler
+    pointer_to_handler* handler;
+};
+
+#else /* !__TBB_WEAK_SYMBOLS */
+
+#define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h)}
+//! Association between a handler name and location of pointer to it.
+struct dynamic_link_descriptor {
+    //! Name of the handler
+    const char* name;
+    //! Pointer to the handler
+    pointer_to_handler* handler;
+};
+
+#endif /* !__TBB_WEAK_SYMBOLS */
+
+#if _WIN32||_WIN64
+typedef HMODULE dynamic_link_handle;
+#else 
+typedef void* dynamic_link_handle;
+#endif /* _WIN32||_WIN64 */
+
+//! Fill in dynamically linked handlers.
+/** 'n' is the length of the array descriptors[].
+    'required' is the number of the initial entries in the array descriptors[] 
+    that have to be found in order for the call to succeed. If the library and 
+    all the required handlers are found, then the corresponding handler pointers 
+    are set, and the return value is true.  Otherwise the original array of 
+    descriptors is left untouched and the return value is false. **/
+bool dynamic_link( const char* libraryname, 
+                   const dynamic_link_descriptor descriptors[], 
+                   size_t n, 
+                   size_t required = ~(size_t)0,
+                   dynamic_link_handle* handle = 0 );
+
+void dynamic_unlink( dynamic_link_handle handle );
+
+CLOSE_INTERNAL_NAMESPACE
+
+#endif /* __TBB_dynamic_link */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.cpp
new file mode 100644 (file)
index 0000000..2b97247
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "governor.h"
+#include "tbb_main.h"
+#include "scheduler.h"
+#if __TBB_ARENA_PER_MASTER
+#include "market.h"
+#endif /* __TBB_ARENA_PER_MASTER */
+#include "arena.h"
+
+#include "tbb/task_scheduler_init.h"
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+#include "dynamic_link.h"
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+namespace tbb {
+namespace internal {
+
+//------------------------------------------------------------------------
+// governor
+//------------------------------------------------------------------------
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+
+#if _WIN32
+#define CILKLIB_NAME "cilkrts20.dll"
+#else
+#define CILKLIB_NAME "libcilkrts.so"
+#endif
+
+//! Handler for memory allocation
+static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
+                                                 struct __cilk_tbb_stack_op_thunk o);
+
+//! Table describing the how to link the handlers.
+static const dynamic_link_descriptor CilkLinkTable[] = {
+    DLD(__cilkrts_watch_stack, watch_stack_handler)
+};
+
+void initialize_survive_thread_switch() {
+    dynamic_link( CILKLIB_NAME, CilkLinkTable, 1 );
+}
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+namespace rml {
+    tbb_server* make_private_server( tbb_client& client );
+}
+
+void governor::acquire_resources () {
+#if USE_PTHREAD
+    int status = theTLS.create(auto_terminate);
+#else
+    int status = theTLS.create();
+#endif
+    if( status )
+        handle_perror(status, "TBB failed to initialize TLS storage\n");
+
+    ::rml::factory::status_type res = theRMLServerFactory.open(); 
+    UsePrivateRML = res != ::rml::factory::st_success;
+}
+
+void governor::release_resources () {
+    theRMLServerFactory.close();
+#if TBB_USE_ASSERT
+    if( __TBB_InitOnce::initialization_done() && theTLS.get() ) 
+        runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
+#endif
+    int status = theTLS.destroy();
+    if( status )
+        handle_perror(status, "TBB failed to destroy TLS storage");
+}
+
+rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
+    rml::tbb_server* server = NULL;
+    if( !UsePrivateRML ) {
+        ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
+        if( status != ::rml::factory::st_success ) {
+            UsePrivateRML = true;
+            runtime_warning( "rml::tbb_factorymake_server failed with status %x, falling back on private rml", status );
+        }
+    }
+    if ( !server ) {
+        __TBB_ASSERT( UsePrivateRML, NULL );
+        server = rml::make_private_server( client );
+    }
+    __TBB_ASSERT( server, "Failed to create RML server" );
+    return server;
+}
+
+#if !__TBB_ARENA_PER_MASTER
+
+arena* governor::obtain_arena( int number_of_threads, stack_size_type thread_stack_size ) {
+    mutex::scoped_lock lock( theArenaMutex );
+    arena* arena = theArena;
+    if( arena ) {
+        arena->prefix().number_of_masters += 1;
+    } else {
+        __TBB_ASSERT( number_of_threads > 0, NULL );
+        arena = arena::allocate_arena( 2*number_of_threads, number_of_threads-1,
+                                   thread_stack_size ? thread_stack_size : ThreadStackSize );
+        __TBB_ASSERT( arena->prefix().number_of_masters==1, NULL );
+        NumWorkers = arena->prefix().number_of_workers;
+
+        arena->prefix().server = create_rml_server( arena->prefix() );
+
+        // Publish the arena.  
+        // A memory release fence is not required here, because workers have not started yet,
+        // and concurrent masters inspect theArena while holding theArenaMutex.
+        __TBB_ASSERT( !theArena, NULL );
+        theArena = arena;
+    }
+    return arena;
+}
+
+void governor::finish_with_arena() {
+    mutex::scoped_lock lock( theArenaMutex );
+    arena* a = theArena;
+    __TBB_ASSERT( a, "theArena is missing" );
+    if( --(a->prefix().number_of_masters) )
+        a = NULL;
+    else {
+        theArena = NULL;
+        // Must do this while holding lock, otherwise terminate message might reach
+        // RML thread *after* initialize message reaches it for the next arena,
+        // which causes TLS to be set to new value before old one is erased!
+        a->close_arena();
+    }
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+void governor::sign_on(generic_scheduler* s) {
+    __TBB_ASSERT( !s->is_registered, NULL );  
+    s->is_registered = true;
+#if !__TBB_ARENA_PER_MASTER
+    __TBB_InitOnce::add_ref();
+#endif /* !__TBB_ARENA_PER_MASTER */
+    theTLS.set(s);
+#if __TBB_SURVIVE_THREAD_SWITCH
+    __cilk_tbb_stack_op_thunk o;
+    o.routine = &stack_op_handler;
+    o.data = s;
+    if( watch_stack_handler ) {
+        if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
+            // Failed to register with Cilk, make sure we are clean
+            s->my_cilk_unwatch_thunk.routine = NULL;
+        }
+#if TBB_USE_ASSERT
+        else
+            s->my_cilk_state = generic_scheduler::cs_running;
+#endif /* TBB_USE_ASSERT */
+    }
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+}
+
+void governor::sign_off(generic_scheduler* s) {
+    if( s->is_registered ) {
+        __TBB_ASSERT( theTLS.get()==s || (!s->is_worker() && !theTLS.get()), "attempt to unregister a wrong scheduler instance" );
+        theTLS.set(NULL);
+        s->is_registered = false;
+#if __TBB_SURVIVE_THREAD_SWITCH
+        __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
+        if ( ut.routine )
+           (*ut.routine)(ut.data);
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+#if !__TBB_ARENA_PER_MASTER
+        __TBB_InitOnce::remove_ref();
+#endif /* !__TBB_ARENA_PER_MASTER */
+    }
+}
+
+generic_scheduler* governor::init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init ) {
+    if( !__TBB_InitOnce::initialization_done() )
+        DoOneTimeInitializations();
+    generic_scheduler* s = theTLS.get();
+    if( s ) {
+        s->ref_count += 1;
+        return s;
+    }
+    if( (int)num_threads == task_scheduler_init::automatic )
+        num_threads = default_num_threads();
+#if __TBB_ARENA_PER_MASTER
+    s = generic_scheduler::create_master( 
+            market::create_arena( num_threads - 1, stack_size ? stack_size : ThreadStackSize ) );
+#else /* !__TBB_ARENA_PER_MASTER */
+    s = generic_scheduler::create_master( *obtain_arena(num_threads, stack_size) );
+#endif /* !__TBB_ARENA_PER_MASTER */
+    __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
+    s->is_auto_initialized = auto_init;
+    return s;
+}
+
+void governor::terminate_scheduler( generic_scheduler* s ) {
+    __TBB_ASSERT( s == theTLS.get(), "Attempt to terminate non-local scheduler instance" );
+    if( !--(s->ref_count) )
+        s->cleanup_master();
+}
+
+void governor::auto_terminate(void* arg){
+    generic_scheduler* s = static_cast<generic_scheduler*>(arg);
+    if( s && s->is_auto_initialized ) {
+        if( !--(s->ref_count) ) {
+            if ( !theTLS.get() && !s->local_task_pool_empty() ) {
+                // This thread's TLS slot is already cleared. But in order to execute
+                // remaining tasks cleanup_master() will need TLS correctly set.
+                // So we temporarily restore its value.
+                theTLS.set(s);
+                s->cleanup_master();
+                theTLS.set(NULL);
+            }
+            else
+                s->cleanup_master();
+        }
+    }
+}
+
+void governor::print_version_info () {
+    if ( UsePrivateRML )
+        PrintExtraVersionInfo( "RML", "private" );
+    else {
+        PrintExtraVersionInfo( "RML", "shared" );
+        theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
+    }
+#if __TBB_SURVIVE_THREAD_SWITCH
+    if( watch_stack_handler )
+        PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+}
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+__cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
+    __TBB_ASSERT(data,NULL);
+    generic_scheduler* s = static_cast<generic_scheduler*>(data);
+#if TBB_USE_ASSERT
+    void* current = theTLS.get();
+#if _WIN32||_WIN64
+    unsigned thread_id = GetCurrentThreadId();
+#else
+    unsigned thread_id = unsigned(pthread_self());
+#endif
+
+#endif /* TBB_USE_ASSERT */
+    switch( op ) {
+        default:
+            __TBB_ASSERT( 0, "invalid op" );
+        case CILK_TBB_STACK_ADOPT: {
+            __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || 
+                          current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
+#if TBB_USE_ASSERT
+            if( current==s ) 
+                runtime_warning( "redundant adoption of %p by thread %x\n", s, thread_id );
+            s->my_cilk_state = generic_scheduler::cs_running;
+#endif /* TBB_USE_ASSERT */
+            theTLS.set(s);
+            break;
+        }
+        case CILK_TBB_STACK_ORPHAN: {
+            __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" ); 
+#if TBB_USE_ASSERT
+            s->my_cilk_state = generic_scheduler::cs_limbo;
+#endif /* TBB_USE_ASSERT */
+            theTLS.set(NULL);
+            break;
+        }
+        case CILK_TBB_STACK_RELEASE: {
+            __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || 
+                          current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
+#if TBB_USE_ASSERT
+            s->my_cilk_state = generic_scheduler::cs_freed;
+#endif /* TBB_USE_ASSERT */
+            s->my_cilk_unwatch_thunk.routine = NULL;
+            auto_terminate( s );
+        } 
+    }
+    return 0;
+}
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+} // namespace internal
+
+//------------------------------------------------------------------------
+// task_scheduler_init
+//------------------------------------------------------------------------
+
+using namespace internal;
+
+/** Left out-of-line for the sake of the backward binary compatibility **/
+void task_scheduler_init::initialize( int number_of_threads ) {
+    initialize( number_of_threads, 0 );
+}
+
+void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
+    if( number_of_threads!=deferred ) {
+        __TBB_ASSERT( !my_scheduler, "task_scheduler_init already initialized" );
+        __TBB_ASSERT( number_of_threads==-1 || number_of_threads>=1,
+                    "number_of_threads for task_scheduler_init must be -1 or positive" );
+        my_scheduler = governor::init_scheduler( number_of_threads, thread_stack_size );
+    } else {
+        __TBB_ASSERT( !thread_stack_size, "deferred initialization ignores stack size setting" );
+    }
+}
+
+void task_scheduler_init::terminate() {
+    generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);
+    my_scheduler = NULL;
+    __TBB_ASSERT( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
+    governor::terminate_scheduler(s);
+}
+
+int task_scheduler_init::default_num_threads() {
+    return governor::default_num_threads();
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/governor.h
new file mode 100644 (file)
index 0000000..45b82a1
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_governor_H
+#define _TBB_governor_H
+
+#include "tbb/task_scheduler_init.h"
+#if !__TBB_ARENA_PER_MASTER
+#include "tbb/mutex.h"
+#endif /* !__TBB_ARENA_PER_MASTER */
+#include "../rml/include/rml_tbb.h"
+
+#include "tbb_misc.h" // for DetectNumberOfWorkers and ThreadStackSize
+#include "tls.h"
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+#include "cilk-tbb-interop.h"
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+namespace tbb {
+namespace internal {
+
+#if __TBB_ARENA_PER_MASTER
+class market;
+#else /* !__TBB_ARENA_PER_MASTER */
+class arena;
+#endif /* !__TBB_ARENA_PER_MASTER */
+class generic_scheduler;
+class __TBB_InitOnce;
+
+//------------------------------------------------------------------------
+// Class governor
+//------------------------------------------------------------------------
+
+#if __TBB_ARENA_PER_MASTER
+//! The class handles access to the single instance of market, and to TLS to keep scheduler instances.
+#else /* !__TBB_ARENA_PER_MASTER */
+//! The class handles access to the single instance of arena, and to TLS to keep scheduler instances.
+#endif /* !__TBB_ARENA_PER_MASTER */
+/** It also supports automatic on-demand initialization of the TBB scheduler.
+    The class contains only static data members and methods.*/
+class governor {
+    friend class __TBB_InitOnce;
+#if __TBB_ARENA_PER_MASTER
+    friend class market;
+#else /* !__TBB_ARENA_PER_MASTER */
+    friend void ITT_DoUnsafeOneTimeInitialization ();
+#endif /* __TBB_ARENA_PER_MASTER */
+
+    //! TLS for scheduler instances associated with individual threads
+    static basic_tls<generic_scheduler*> theTLS;
+
+#if !__TBB_ARENA_PER_MASTER
+    //! Currently active arena
+    static arena* theArena;
+
+    //! Mutex guarding creation/destruction of theArena
+    static mutex  theArenaMutex;
+
+    //! Caches the number of workers in the currently active arena
+    static unsigned NumWorkers;
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Caches the maximal level of paralellism supported by the hardware 
+    static unsigned DefaultNumberOfThreads;
+    
+    static rml::tbb_factory theRMLServerFactory;
+
+    static bool UsePrivateRML;
+
+    //! Create key for thread-local storage and initialize RML.
+    static void acquire_resources ();
+
+    //! Destroy the thread-local storage key and deinitialize RML.
+    static void release_resources ();
+
+    static rml::tbb_server* create_rml_server ( rml::tbb_client& );
+
+#if !__TBB_ARENA_PER_MASTER
+    //! Obtain the instance of arena to register a new master thread
+    /** If there is no active arena, create one. */
+    static arena* obtain_arena( int number_of_threads, stack_size_type thread_stack_size );
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! The internal routine to undo automatic initialization.
+    /** The signature is written with void* so that the routine
+        can be the destructor argument to pthread_key_create. */
+    static void auto_terminate(void* scheduler);
+
+public:
+    static unsigned default_num_threads () {
+        // No memory fence required, because at worst each invoking thread calls DetectNumberOfWorkers once.
+        return DefaultNumberOfThreads ? DefaultNumberOfThreads : 
+                                        DefaultNumberOfThreads = DetectNumberOfWorkers();
+    }
+    //! Processes scheduler initialization request (possibly nested) in a master thread
+    /** If necessary creates new instance of arena and/or local scheduler.
+        The auto_init argument specifies if the call is due to automatic initialization. **/
+    static generic_scheduler* init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init = false );
+
+    //! Processes scheduler termination request (possibly nested) in a master thread
+    static void terminate_scheduler( generic_scheduler* s );
+
+#if __TBB_ARENA_PER_MASTER
+    //! Returns number of worker threads in the currently active arena.
+    inline static unsigned max_number_of_workers ();
+
+#else /* !__TBB_ARENA_PER_MASTER */
+    //! Dereference arena when a master thread stops using TBB.
+    /** If no more masters in the arena, terminate workers and destroy it. */
+    static void finish_with_arena();
+
+    static unsigned max_number_of_workers() {
+        __TBB_ASSERT( theArena, "thread did not activate a task_scheduler_init object?" );
+        return NumWorkers;
+    }
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Register TBB scheduler instance in thread local storage.
+    static void sign_on(generic_scheduler* s);
+
+    //! Unregister TBB scheduler instance from thread local storage.
+    static void sign_off(generic_scheduler* s);
+
+    //! Used to check validity of the local scheduler TLS contents.
+    static bool is_set ( generic_scheduler* s ) { return theTLS.get() == s; }
+
+    //! Temporarily set TLS slot to the given scheduler
+    static void assume_scheduler( generic_scheduler* s ) { 
+#if !__TBB_ARENA_PER_MASTER
+        // should be called by a Master
+        __TBB_ASSERT( !s || !theTLS.get(), "should be called by master" );
+#endif
+        theTLS.set( s ); 
+    }
+
+    //! Obtain the thread local instance of the TBB scheduler.
+    /** If the scheduler has not been initialized yet, initialization is done automatically.
+        Note that auto-initialized scheduler instance is destroyed only when its thread terminates. **/
+    static generic_scheduler* local_scheduler () {
+        generic_scheduler* s = theTLS.get();
+        return s ? s : init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true );
+    }
+
+    static generic_scheduler* local_scheduler_if_initialized () {
+        return theTLS.get();
+    }
+
+    //! Undo automatic initialization if necessary; call when a thread exits.
+    static void terminate_auto_initialized_scheduler() {
+        auto_terminate( theTLS.get() );
+    }
+
+    static void print_version_info ();
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+    static __cilk_tbb_retcode stack_op_handler( __cilk_tbb_stack_op op, void* );
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+}; // class governor
+
+} // namespace internal
+} // namespace tbb
+
+#if __TBB_ARENA_PER_MASTER
+#include "scheduler.h"
+
+inline unsigned tbb::internal::governor::max_number_of_workers () {
+    return local_scheduler()->number_of_workers_in_my_arena();
+}
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#endif /* _TBB_governor_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm
new file mode 100644 (file)
index 0000000..8c881dc
--- /dev/null
@@ -0,0 +1,196 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+.686
+.model flat,c
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchadd1
+__TBB_machine_fetchadd1:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xadd [edx],al
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchstore1
+__TBB_machine_fetchstore1:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xchg [edx],al
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_cmpswp1
+__TBB_machine_cmpswp1:
+       mov edx,4[esp]
+       mov ecx,8[esp]
+       mov eax,12[esp]
+       lock cmpxchg [edx],cl
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchadd2
+__TBB_machine_fetchadd2:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xadd [edx],ax
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchstore2
+__TBB_machine_fetchstore2:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xchg [edx],ax
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_cmpswp2
+__TBB_machine_cmpswp2:
+       mov edx,4[esp]
+       mov ecx,8[esp]
+       mov eax,12[esp]
+       lock cmpxchg [edx],cx
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchadd4
+__TBB_machine_fetchadd4:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xadd [edx],eax
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchstore4
+__TBB_machine_fetchstore4:
+       mov edx,4[esp]
+       mov eax,8[esp]
+       lock xchg [edx],eax
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_cmpswp4
+__TBB_machine_cmpswp4:
+       mov edx,4[esp]
+       mov ecx,8[esp]
+       mov eax,12[esp]
+       lock cmpxchg [edx],ecx
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchadd8
+__TBB_machine_fetchadd8:
+       push ebx
+       push edi
+       mov edi,12[esp]
+       mov eax,[edi]
+       mov edx,4[edi]
+__TBB_machine_fetchadd8_loop:
+       mov ebx,16[esp]
+       mov ecx,20[esp]
+       add ebx,eax
+       adc ecx,edx
+       lock cmpxchg8b qword ptr [edi]
+       jnz __TBB_machine_fetchadd8_loop
+       pop edi
+       pop ebx
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_fetchstore8
+__TBB_machine_fetchstore8:
+       push ebx
+       push edi
+       mov edi,12[esp]
+       mov ebx,16[esp]
+       mov ecx,20[esp]
+       mov eax,[edi]
+       mov edx,4[edi]
+__TBB_machine_fetchstore8_loop:
+       lock cmpxchg8b qword ptr [edi]
+       jnz __TBB_machine_fetchstore8_loop
+       pop edi
+       pop ebx
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_cmpswp8
+__TBB_machine_cmpswp8:
+       push ebx
+       push edi
+       mov edi,12[esp]
+       mov ebx,16[esp]
+       mov ecx,20[esp]
+       mov eax,24[esp]
+       mov edx,28[esp]
+       lock cmpxchg8b qword ptr [edi]
+       pop edi
+       pop ebx
+       ret
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_load8
+__TBB_machine_Load8:
+       ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check.
+       mov ecx,4[esp]
+       test ecx,7
+       jne load_slow
+       ; Load within a cache line
+       sub esp,12
+       fild qword ptr [ecx]
+       fistp qword ptr [esp]
+       mov eax,[esp]
+       mov edx,4[esp]
+       add esp,12
+       ret
+load_slow:
+       ; Load is misaligned. Use cmpxchg8b.
+       push ebx
+       push edi
+       mov edi,ecx
+       xor eax,eax
+       xor ebx,ebx
+       xor ecx,ecx
+       xor edx,edx
+       lock cmpxchg8b qword ptr [edi]
+       pop edi
+       pop ebx
+       ret
+EXTRN __TBB_machine_store8_slow:PROC
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_store8
+__TBB_machine_Store8:
+       ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check.
+       mov ecx,4[esp]
+       test ecx,7
+       jne __TBB_machine_store8_slow ;; tail call to tbb_misc.cpp
+       fild qword ptr 8[esp]
+       fistp qword ptr [ecx]
+       ret
+end
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm
new file mode 100644 (file)
index 0000000..be1552f
--- /dev/null
@@ -0,0 +1,46 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE
+.686
+.model flat,c
+.code 
+       ALIGN 4
+       PUBLIC c __TBB_machine_trylockbyte
+__TBB_machine_trylockbyte:
+       mov edx,4[esp]
+       mov al,[edx]
+       mov cl,1
+       test al,1
+       jnz __TBB_machine_trylockbyte_contended
+       lock cmpxchg [edx],cl
+       jne __TBB_machine_trylockbyte_contended
+       mov eax,1
+       ret
+__TBB_machine_trylockbyte_contended:
+       xor eax,eax
+       ret
+end
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s
new file mode 100644 (file)
index 0000000..f7c6835
--- /dev/null
@@ -0,0 +1,678 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_fetchadd1__TBB_full_fence#
+        .global __TBB_machine_fetchadd1__TBB_full_fence#
+__TBB_machine_fetchadd1__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_fetchadd1acquire
+}
+        .endp __TBB_machine_fetchadd1__TBB_full_fence#
+
+        .proc __TBB_machine_fetchadd1acquire#
+        .global __TBB_machine_fetchadd1acquire#
+__TBB_machine_fetchadd1acquire:
+
+
+
+
+
+
+
+        ld1 r9=[r32]
+;;
+Retry_1acquire:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg1.acq r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_1acquire
+        br.ret.sptk.many b0
+# 49 "<stdin>"
+        .endp __TBB_machine_fetchadd1acquire#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore1__TBB_full_fence#
+        .global __TBB_machine_fetchstore1__TBB_full_fence#
+__TBB_machine_fetchstore1__TBB_full_fence:
+        mf
+;;
+        xchg1 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore1__TBB_full_fence#
+
+
+        .proc __TBB_machine_fetchstore1acquire#
+        .global __TBB_machine_fetchstore1acquire#
+__TBB_machine_fetchstore1acquire:
+        xchg1 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore1acquire#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_cmpswp1__TBB_full_fence#
+        .global __TBB_machine_cmpswp1__TBB_full_fence#
+__TBB_machine_cmpswp1__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_cmpswp1acquire
+}
+        .endp __TBB_machine_cmpswp1__TBB_full_fence#
+
+        .proc __TBB_machine_cmpswp1acquire#
+        .global __TBB_machine_cmpswp1acquire#
+__TBB_machine_cmpswp1acquire:
+
+        zxt1 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg1.acq r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp1acquire#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_fetchadd2__TBB_full_fence#
+        .global __TBB_machine_fetchadd2__TBB_full_fence#
+__TBB_machine_fetchadd2__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_fetchadd2acquire
+}
+        .endp __TBB_machine_fetchadd2__TBB_full_fence#
+
+        .proc __TBB_machine_fetchadd2acquire#
+        .global __TBB_machine_fetchadd2acquire#
+__TBB_machine_fetchadd2acquire:
+
+
+
+
+
+
+
+        ld2 r9=[r32]
+;;
+Retry_2acquire:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg2.acq r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_2acquire
+        br.ret.sptk.many b0
+# 49 "<stdin>"
+        .endp __TBB_machine_fetchadd2acquire#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore2__TBB_full_fence#
+        .global __TBB_machine_fetchstore2__TBB_full_fence#
+__TBB_machine_fetchstore2__TBB_full_fence:
+        mf
+;;
+        xchg2 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore2__TBB_full_fence#
+
+
+        .proc __TBB_machine_fetchstore2acquire#
+        .global __TBB_machine_fetchstore2acquire#
+__TBB_machine_fetchstore2acquire:
+        xchg2 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore2acquire#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_cmpswp2__TBB_full_fence#
+        .global __TBB_machine_cmpswp2__TBB_full_fence#
+__TBB_machine_cmpswp2__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_cmpswp2acquire
+}
+        .endp __TBB_machine_cmpswp2__TBB_full_fence#
+
+        .proc __TBB_machine_cmpswp2acquire#
+        .global __TBB_machine_cmpswp2acquire#
+__TBB_machine_cmpswp2acquire:
+
+        zxt2 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg2.acq r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp2acquire#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_fetchadd4__TBB_full_fence#
+        .global __TBB_machine_fetchadd4__TBB_full_fence#
+__TBB_machine_fetchadd4__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_fetchadd4acquire
+}
+        .endp __TBB_machine_fetchadd4__TBB_full_fence#
+
+        .proc __TBB_machine_fetchadd4acquire#
+        .global __TBB_machine_fetchadd4acquire#
+__TBB_machine_fetchadd4acquire:
+
+        cmp.eq p6,p0=1,r33
+        cmp.eq p8,p0=-1,r33
+  (p6) br.cond.dptk Inc_4acquire
+  (p8) br.cond.dpnt Dec_4acquire
+;;
+
+        ld4 r9=[r32]
+;;
+Retry_4acquire:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg4.acq r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_4acquire
+        br.ret.sptk.many b0
+
+Inc_4acquire:
+        fetchadd4.acq r8=[r32],1
+        br.ret.sptk.many b0
+Dec_4acquire:
+        fetchadd4.acq r8=[r32],-1
+        br.ret.sptk.many b0
+
+        .endp __TBB_machine_fetchadd4acquire#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore4__TBB_full_fence#
+        .global __TBB_machine_fetchstore4__TBB_full_fence#
+__TBB_machine_fetchstore4__TBB_full_fence:
+        mf
+;;
+        xchg4 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore4__TBB_full_fence#
+
+
+        .proc __TBB_machine_fetchstore4acquire#
+        .global __TBB_machine_fetchstore4acquire#
+__TBB_machine_fetchstore4acquire:
+        xchg4 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore4acquire#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_cmpswp4__TBB_full_fence#
+        .global __TBB_machine_cmpswp4__TBB_full_fence#
+__TBB_machine_cmpswp4__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_cmpswp4acquire
+}
+        .endp __TBB_machine_cmpswp4__TBB_full_fence#
+
+        .proc __TBB_machine_cmpswp4acquire#
+        .global __TBB_machine_cmpswp4acquire#
+__TBB_machine_cmpswp4acquire:
+
+        zxt4 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg4.acq r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp4acquire#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_fetchadd8__TBB_full_fence#
+        .global __TBB_machine_fetchadd8__TBB_full_fence#
+__TBB_machine_fetchadd8__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_fetchadd8acquire
+}
+        .endp __TBB_machine_fetchadd8__TBB_full_fence#
+
+        .proc __TBB_machine_fetchadd8acquire#
+        .global __TBB_machine_fetchadd8acquire#
+__TBB_machine_fetchadd8acquire:
+
+        cmp.eq p6,p0=1,r33
+        cmp.eq p8,p0=-1,r33
+  (p6) br.cond.dptk Inc_8acquire
+  (p8) br.cond.dpnt Dec_8acquire
+;;
+
+        ld8 r9=[r32]
+;;
+Retry_8acquire:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg8.acq r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_8acquire
+        br.ret.sptk.many b0
+
+Inc_8acquire:
+        fetchadd8.acq r8=[r32],1
+        br.ret.sptk.many b0
+Dec_8acquire:
+        fetchadd8.acq r8=[r32],-1
+        br.ret.sptk.many b0
+
+        .endp __TBB_machine_fetchadd8acquire#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore8__TBB_full_fence#
+        .global __TBB_machine_fetchstore8__TBB_full_fence#
+__TBB_machine_fetchstore8__TBB_full_fence:
+        mf
+;;
+        xchg8 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore8__TBB_full_fence#
+
+
+        .proc __TBB_machine_fetchstore8acquire#
+        .global __TBB_machine_fetchstore8acquire#
+__TBB_machine_fetchstore8acquire:
+        xchg8 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore8acquire#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+
+
+        .proc __TBB_machine_cmpswp8__TBB_full_fence#
+        .global __TBB_machine_cmpswp8__TBB_full_fence#
+__TBB_machine_cmpswp8__TBB_full_fence:
+{
+        mf
+        br __TBB_machine_cmpswp8acquire
+}
+        .endp __TBB_machine_cmpswp8__TBB_full_fence#
+
+        .proc __TBB_machine_cmpswp8acquire#
+        .global __TBB_machine_cmpswp8acquire#
+__TBB_machine_cmpswp8acquire:
+
+
+
+
+        mov ar.ccv=r34
+;;
+        cmpxchg8.acq r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp8acquire#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+# 19 "<stdin>"
+        .proc __TBB_machine_fetchadd1release#
+        .global __TBB_machine_fetchadd1release#
+__TBB_machine_fetchadd1release:
+
+
+
+
+
+
+
+        ld1 r9=[r32]
+;;
+Retry_1release:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg1.rel r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_1release
+        br.ret.sptk.many b0
+# 49 "<stdin>"
+        .endp __TBB_machine_fetchadd1release#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore1release#
+        .global __TBB_machine_fetchstore1release#
+__TBB_machine_fetchstore1release:
+        mf
+;;
+        xchg1 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore1release#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+# 101 "<stdin>"
+        .proc __TBB_machine_cmpswp1release#
+        .global __TBB_machine_cmpswp1release#
+__TBB_machine_cmpswp1release:
+
+        zxt1 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg1.rel r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp1release#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+# 19 "<stdin>"
+        .proc __TBB_machine_fetchadd2release#
+        .global __TBB_machine_fetchadd2release#
+__TBB_machine_fetchadd2release:
+
+
+
+
+
+
+
+        ld2 r9=[r32]
+;;
+Retry_2release:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg2.rel r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_2release
+        br.ret.sptk.many b0
+# 49 "<stdin>"
+        .endp __TBB_machine_fetchadd2release#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore2release#
+        .global __TBB_machine_fetchstore2release#
+__TBB_machine_fetchstore2release:
+        mf
+;;
+        xchg2 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore2release#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+# 101 "<stdin>"
+        .proc __TBB_machine_cmpswp2release#
+        .global __TBB_machine_cmpswp2release#
+__TBB_machine_cmpswp2release:
+
+        zxt2 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg2.rel r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp2release#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+# 19 "<stdin>"
+        .proc __TBB_machine_fetchadd4release#
+        .global __TBB_machine_fetchadd4release#
+__TBB_machine_fetchadd4release:
+
+        cmp.eq p6,p0=1,r33
+        cmp.eq p8,p0=-1,r33
+  (p6) br.cond.dptk Inc_4release
+  (p8) br.cond.dpnt Dec_4release
+;;
+
+        ld4 r9=[r32]
+;;
+Retry_4release:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg4.rel r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_4release
+        br.ret.sptk.many b0
+
+Inc_4release:
+        fetchadd4.rel r8=[r32],1
+        br.ret.sptk.many b0
+Dec_4release:
+        fetchadd4.rel r8=[r32],-1
+        br.ret.sptk.many b0
+
+        .endp __TBB_machine_fetchadd4release#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore4release#
+        .global __TBB_machine_fetchstore4release#
+__TBB_machine_fetchstore4release:
+        mf
+;;
+        xchg4 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore4release#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+# 101 "<stdin>"
+        .proc __TBB_machine_cmpswp4release#
+        .global __TBB_machine_cmpswp4release#
+__TBB_machine_cmpswp4release:
+
+        zxt4 r34=r34
+;;
+
+        mov ar.ccv=r34
+;;
+        cmpxchg4.rel r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp4release#
+// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh
+# 1 "<stdin>"
+# 1 "<built-in>"
+# 1 "<command line>"
+# 1 "<stdin>"
+
+
+
+
+
+        .section .text
+        .align 16
+# 19 "<stdin>"
+        .proc __TBB_machine_fetchadd8release#
+        .global __TBB_machine_fetchadd8release#
+__TBB_machine_fetchadd8release:
+
+        cmp.eq p6,p0=1,r33
+        cmp.eq p8,p0=-1,r33
+  (p6) br.cond.dptk Inc_8release
+  (p8) br.cond.dpnt Dec_8release
+;;
+
+        ld8 r9=[r32]
+;;
+Retry_8release:
+        mov ar.ccv=r9
+        mov r8=r9;
+        add r10=r9,r33
+;;
+        cmpxchg8.rel r9=[r32],r10,ar.ccv
+;;
+        cmp.ne p7,p0=r8,r9
+  (p7) br.cond.dpnt Retry_8release
+        br.ret.sptk.many b0
+
+Inc_8release:
+        fetchadd8.rel r8=[r32],1
+        br.ret.sptk.many b0
+Dec_8release:
+        fetchadd8.rel r8=[r32],-1
+        br.ret.sptk.many b0
+
+        .endp __TBB_machine_fetchadd8release#
+# 62 "<stdin>"
+        .section .text
+        .align 16
+        .proc __TBB_machine_fetchstore8release#
+        .global __TBB_machine_fetchstore8release#
+__TBB_machine_fetchstore8release:
+        mf
+;;
+        xchg8 r8=[r32],r33
+        br.ret.sptk.many b0
+        .endp __TBB_machine_fetchstore8release#
+# 88 "<stdin>"
+        .section .text
+        .align 16
+# 101 "<stdin>"
+        .proc __TBB_machine_cmpswp8release#
+        .global __TBB_machine_cmpswp8release#
+__TBB_machine_cmpswp8release:
+
+
+
+
+        mov ar.ccv=r34
+;;
+        cmpxchg8.rel r8=[r32],r33,ar.ccv
+        br.ret.sptk.many b0
+        .endp __TBB_machine_cmpswp8release#
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s
new file mode 100644 (file)
index 0000000..d4233d2
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+       // RSE backing store pointer retrieval
+    .section .text
+    .align 16
+    .proc __TBB_get_bsp#
+    .global __TBB_get_bsp#
+__TBB_get_bsp:
+        mov r8=ar.bsp
+        br.ret.sptk.many b0
+    .endp __TBB_get_bsp#
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s
new file mode 100644 (file)
index 0000000..932bd37
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+       // Support for class TinyLock
+       .section .text
+       .align 16
+       // unsigned int __TBB_machine_trylockbyte( byte& flag );
+       // r32 = address of flag 
+       .proc  __TBB_machine_trylockbyte#
+       .global __TBB_machine_trylockbyte#
+ADDRESS_OF_FLAG=r32
+RETCODE=r8
+FLAG=r9
+BUSY=r10
+SCRATCH=r11
+__TBB_machine_trylockbyte:
+        ld1.acq FLAG=[ADDRESS_OF_FLAG]
+        mov BUSY=1
+        mov RETCODE=0
+;;
+        cmp.ne p6,p0=0,FLAG
+        mov ar.ccv=r0
+(p6)    br.ret.sptk.many b0
+;;
+        cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv  // Try to acquire lock
+;;
+        cmp.eq p6,p0=0,SCRATCH
+;;
+(p6)    mov RETCODE=1
+       br.ret.sptk.many b0     
+       .endp __TBB_machine_trylockbyte#
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/log2.s b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/log2.s
new file mode 100644 (file)
index 0000000..ed07b98
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+       // Support for class ConcurrentVector
+       .section .text
+       .align 16
+       // unsigned long __TBB_machine_lg( unsigned long x );
+       // r32 = x
+       .proc  __TBB_machine_lg#
+       .global __TBB_machine_lg#
+__TBB_machine_lg:
+        shr r16=r32,1  // .x
+;;
+        shr r17=r32,2  // ..x
+       or r32=r32,r16  // xx
+;;
+       shr r16=r32,3   // ...xx
+       or r32=r32,r17  // xxx
+;;
+       shr r17=r32,5   // .....xxx
+       or r32=r32,r16  // xxxxx
+;;
+       shr r16=r32,8   // ........xxxxx
+       or r32=r32,r17  // xxxxxxxx
+;;
+       shr r17=r32,13
+       or r32=r32,r16  // 13x
+;;
+       shr r16=r32,21
+       or r32=r32,r17  // 21x
+;;
+       shr r17=r32,34  
+       or r32=r32,r16  // 34x
+;;
+       shr r16=r32,55
+       or r32=r32,r17  // 55x
+;;
+       or r32=r32,r16  // 64x
+;;
+       popcnt r8=r32
+;;
+       add r8=-1,r8
+       br.ret.sptk.many b0     
+       .endp __TBB_machine_lg#
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/pause.s b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ia64-gas/pause.s
new file mode 100644 (file)
index 0000000..45c2bb7
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+       .section .text
+       .align 16
+       // void __TBB_machine_pause( long count );
+       // r32 = count
+       .proc  __TBB_machine_pause#
+       .global __TBB_machine_pause#
+count = r32
+__TBB_machine_pause:
+        hint.m 0
+       add count=-1,count
+;;
+       cmp.eq p6,p7=0,count
+(p7)   br.cond.dpnt __TBB_machine_pause
+(p6)           br.ret.sptk.many b0     
+       .endp __TBB_machine_pause#
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c
new file mode 100644 (file)
index 0000000..b72fe7a
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <stdint.h>
+#include <sys/atomic_op.h>
+
+/* This file must be compiled with gcc.  The IBM compiler doesn't seem to
+   support inline assembly statements (October 2007). */
+
+#ifdef __GNUC__
+
+int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand) { 
+    __asm__ __volatile__ ("sync\n");  /* memory release operation */
+    compare_and_swap ((atomic_p) ptr, &comparand, value);
+    __asm__ __volatile__ ("sync\n");  /* memory acquire operation */
+    return comparand;
+}
+
+int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand) { 
+    __asm__ __volatile__ ("sync\n");  /* memory release operation */
+    compare_and_swaplp ((atomic_l) ptr, &comparand, value);
+    __asm__ __volatile__ ("sync\n");  /* memory acquire operation */
+    return comparand;
+}
+
+void __TBB_machine_flush () { 
+    __asm__ __volatile__ ("sync\n");
+}
+
+#endif /* __GNUC__ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/index.html b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/index.html
new file mode 100644 (file)
index 0000000..7ac305c
--- /dev/null
@@ -0,0 +1,32 @@
+<HTML>
+<BODY>
+
+<H2>Overview</H2>
+This directory contains the source code of the TBB core components.
+
+<H2>Directories</H2>
+<DL>
+<DT><A HREF="tools_api">tools_api</A>
+<DD>Source code of the interface components provided by the Intel&reg; Parallel Studio tools.
+<DT><A HREF="intel64-masm">intel64-masm</A>
+<DD>Assembly code for the Intel&reg; 64 architecture.
+<DT><A HREF="ia32-masm">ia32-masm</A>
+<DD>Assembly code for IA32 architecture.
+<DT><A HREF="ia64-gas">ia64-gas</A>
+<DD>Assembly code for IA64 architecture.
+<DT><A HREF="ibm_aix51">ibm_aix51</A>
+<DD>Assembly code for AIX 5.1 port.
+</DL>
+
+<HR>
+<A HREF="../index.html">Up to parent directory</A>
+<p></p>
+Copyright &copy; 2005-2010 Intel Corporation.  All Rights Reserved.
+<p></p>
+Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are 
+registered trademarks or trademarks of Intel Corporation or its 
+subsidiaries in the United States and other countries. 
+<p></p>
+* Other names and brands may be claimed as the property of others.
+</BODY>
+</HTML>
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm
new file mode 100644 (file)
index 0000000..0431221
--- /dev/null
@@ -0,0 +1,80 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_fetchadd1
+__TBB_machine_fetchadd1:
+       mov rax,rdx
+       lock xadd [rcx],al
+       ret
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_fetchstore1
+__TBB_machine_fetchstore1:
+       mov rax,rdx
+       lock xchg [rcx],al
+       ret
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_cmpswp1
+__TBB_machine_cmpswp1:
+       mov rax,r8
+       lock cmpxchg [rcx],dl
+       ret
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_fetchadd2
+__TBB_machine_fetchadd2:
+       mov rax,rdx
+       lock xadd [rcx],ax
+       ret
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_fetchstore2
+__TBB_machine_fetchstore2:
+       mov rax,rdx
+       lock xchg [rcx],ax
+       ret
+.code 
+       ALIGN 8
+       PUBLIC __TBB_machine_cmpswp2
+__TBB_machine_cmpswp2:
+       mov rax,r8
+       lock cmpxchg [rcx],dx
+       ret
+.code
+        ALIGN 8
+        PUBLIC __TBB_machine_pause
+__TBB_machine_pause:
+L1:
+        dw 090f3H; pause
+        add ecx,-1
+        jne L1
+        ret
+end
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/intrusive_list.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/intrusive_list.h
new file mode 100644 (file)
index 0000000..ed1bf1d
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_intrusive_list_H
+#define _TBB_intrusive_list_H
+
+#include "tbb/tbb_stddef.h"
+
+#if __TBB_ARENA_PER_MASTER
+
+namespace tbb {
+namespace internal {
+
+//! Data structure to be inherited by the types that can form intrusive lists.
+/** Intrusive list is formed by means of the member_intrusive_list<T> template class.
+    Note that type T must derive from intrusive_list_node either publicly or 
+    declare instantiation member_intrusive_list<T> as a friend.
+    This class implements a limited subset of std::list interface. **/
+struct intrusive_list_node {
+    intrusive_list_node *my_prev_node,
+                        *my_next_node;
+#if TBB_USE_ASSERT
+    intrusive_list_node () { my_prev_node = my_next_node = this; }
+#endif /* TBB_USE_ASSERT */
+};
+
+//! List of element of type T, where T is derived from intrusive_list_node
+/** The class is not thread safe. **/
+template <class List, class T>
+class intrusive_list_base {
+    //! Pointer to the head node
+    intrusive_list_node my_head;
+
+    //! Number of list elements
+    size_t my_size;
+
+    static intrusive_list_node& node ( T& item ) { return List::node(item); }
+
+    static T& item ( intrusive_list_node* node ) { return List::item(node); }
+
+    template<class Iterator>
+    class iterator_impl {
+        Iterator& self () { return *static_cast<Iterator*>(this); }
+
+        //! Pointer to the head of the list being iterated
+        intrusive_list_node *my_list_head;
+
+        //! Node the iterator points to at the moment
+        intrusive_list_node *my_pos;
+
+    protected:
+        iterator_impl ( intrusive_list_node* head, intrusive_list_node* pos )
+            : my_list_head(head), my_pos(pos)
+        {}
+
+        T& item () const {
+            //return *reinterpret_cast<T*>((char*)my_pos - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000));
+            return intrusive_list_base::item(my_pos);
+        }
+
+    public:
+        iterator_impl () : my_list_head(NULL), my_pos(NULL) {}
+        
+        bool operator == ( const Iterator& it ) const {
+            return my_pos == it.my_pos;
+        }
+
+        bool operator != ( const Iterator& it ) const {
+            return my_pos != it.my_pos;
+        }
+
+        Iterator& operator++ () {
+            my_pos = my_pos->my_next_node;
+            return self();
+        }
+
+        Iterator& operator-- () {
+            my_pos = my_pos->my_prev_node;
+            return self();
+        }
+
+        Iterator operator++ ( int ) {
+            Iterator result = self();
+            ++(*this);
+            return result;
+        }
+
+        Iterator operator-- ( int ) {
+            Iterator result = self();
+            --(*this);
+            return result;
+        }
+    }; // intrusive_list_base::iterator_impl
+
+    void assert_ok () const {
+        __TBB_ASSERT( (my_head.my_prev_node == &my_head && !my_size) || 
+                      (my_head.my_next_node != &my_head && my_size >0), "intrusive_list_base corrupted" );
+#if TBB_USE_ASSERT >= 2
+        size_t i = 0;
+        for ( intrusive_list_node *n = my_head.my_next_node; n != &my_head; n = n->my_next_node )
+            ++i;
+        __TBB_ASSERT( my_size == i, "Wrong size" );
+#endif /* TBB_USE_ASSERT >= 2 */
+    }
+
+public:
+    class iterator : public iterator_impl<iterator> {
+        template <class U, class V> friend class intrusive_list_base;
+
+        iterator ( intrusive_list_node* head, intrusive_list_node* pos )
+            : iterator_impl<iterator>( head, pos )
+        {}
+    public:
+        iterator () {}
+        
+        T* operator-> () const { return &this->item(); }
+
+        T& operator* () const { return this->item(); }
+    }; // class iterator
+
+    class const_iterator : public iterator_impl<const_iterator> {
+        template <class U, class V> friend class intrusive_list_base;
+
+        const_iterator ( const intrusive_list_node* head, const intrusive_list_node* pos )
+            : iterator_impl<const_iterator>( const_cast<intrusive_list_node*>(head), const_cast<intrusive_list_node*>(pos) )
+        {}
+    public:
+        const_iterator () {}
+        
+        const T* operator-> () const { return &this->item(); }
+
+        const T& operator* () const { return this->item(); }
+    }; // class iterator
+
+    intrusive_list_base () : my_size(0) {
+        my_head.my_prev_node = &my_head;
+        my_head.my_next_node = &my_head;
+    }
+
+    bool empty () const { return my_head.my_next_node == &my_head; }
+
+    size_t size () const { return my_size; }
+
+    iterator begin () { return iterator(&my_head, my_head.my_next_node); }
+
+    iterator end () { return iterator(&my_head, &my_head); }
+
+    const_iterator begin () const { return const_iterator(&my_head, my_head.my_next_node); }
+
+    const_iterator end () const { return const_iterator(&my_head, &my_head); }
+
+    void push_front ( T& val ) {
+        __TBB_ASSERT( node(val).my_prev_node == &node(val) && node(val).my_next_node == &node(val), 
+                    "Object with intrusive list node can be part of only one intrusive list simultaneously" );
+        // An object can be part of only one intrusive list at the given moment via the given node member 
+        node(val).my_prev_node = &my_head;
+        node(val).my_next_node = my_head.my_next_node;
+        my_head.my_next_node->my_prev_node = &node(val);
+        my_head.my_next_node = &node(val);
+        ++my_size;
+        assert_ok();
+    }
+
+    void remove( T& val ) {
+        __TBB_ASSERT( node(val).my_prev_node != &node(val) && node(val).my_next_node != &node(val), "Element to remove is not in the list" );
+        --my_size;
+        node(val).my_next_node->my_prev_node = node(val).my_prev_node;
+        node(val).my_prev_node->my_next_node = node(val).my_next_node;
+#if TBB_USE_ASSERT
+        node(val).my_prev_node = node(val).my_next_node = &node(val);
+#endif
+        assert_ok();
+    }
+
+    iterator erase ( iterator it ) {
+        T& val = *it;
+        ++it;
+        remove( val );
+        return it;
+    }
+
+}; // intrusive_list_base
+
+
+//! Double linked list of items of type T containing a member of type intrusive_list_node.
+/** NodePtr is a member pointer to the node data field. Class U is either T or 
+    a base class of T containing the node member. Default values exist for the sake
+    of a partial specialization working with inheritance case.
+
+    The list does not have ownership of its items. Its purpose is to avoid dynamic 
+    memory allocation when forming lists of existing objects.
+
+    The class is not thread safe. **/
+template <class T, class U, intrusive_list_node U::*NodePtr>
+class memptr_intrusive_list : public intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T>
+{
+    friend class intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T>;
+
+    static intrusive_list_node& node ( T& val ) { return val.*NodePtr; }
+
+    static T& item ( intrusive_list_node* node ) {
+        // Cannot use __TBB_offestof (and consequently __TBB_get_object_ref) macro 
+        // with *NodePtr argument because gcc refuses to interpret pasted "->" and "*"
+        // as member pointer dereferencing operator, and explicit usage of ## in 
+        // __TBB_offestof implementation breaks operations with normal member names.
+        return *reinterpret_cast<T*>((char*)node - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000));
+    }
+}; // intrusive_list<T, U, NodePtr>
+
+//! Double linked list of items of type T that is derived from intrusive_list_node class.
+/** The list does not have ownership of its items. Its purpose is to avoid dynamic 
+    memory allocation when forming lists of existing objects.
+
+    The class is not thread safe. **/
+template <class T>
+class intrusive_list : public intrusive_list_base<intrusive_list<T>, T>
+{
+    friend class intrusive_list_base<intrusive_list<T>, T>;
+
+    static intrusive_list_node& node ( T& val ) { return val; }
+
+    static T& item ( intrusive_list_node* node ) { return *static_cast<T*>(node); }
+}; // intrusive_list<T>
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#endif /* _TBB_intrusive_list_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.cpp
new file mode 100644 (file)
index 0000000..9c52838
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if DO_ITT_NOTIFY
+
+#if _WIN32||_WIN64
+    #ifndef UNICODE
+        #define UNICODE
+    #endif
+#endif /* WIN */
+
+extern "C" void ITT_DoOneTimeInitialization();
+
+#define ITT_SIMPLE_INIT 1
+#define __itt_init_ittlib_name(x,y) (ITT_DoOneTimeInitialization(), true)
+
+#include "tools_api/ittnotify_static.c"
+
+namespace tbb {
+namespace internal {
+int __TBB_load_ittnotify() {
+    return __itt_init_ittlib(NULL, __itt_group_none);
+}
+
+}} // namespaces
+
+#endif /* DO_ITT_NOTIFY */
+
+#define __TBB_NO_IMPLICIT_LINKAGE 1
+#include "itt_notify.h"
+
+namespace tbb {
+
+#if DO_ITT_NOTIFY
+    const tchar 
+            *SyncType_GlobalLock = _T("TbbGlobalLock"),
+            *SyncType_Scheduler = _T("%Constant")
+            ;
+    const tchar 
+            *SyncObj_SchedulerInitialization = _T("TbbSchedulerInitialization"),
+            *SyncObj_SchedulersList = _T("TbbSchedulersList"),
+            *SyncObj_WorkerLifeCycleMgmt = _T("TBB Scheduler"),
+            *SyncObj_TaskStealingLoop = _T("TBB Scheduler"),
+            *SyncObj_WorkerTaskPool = _T("TBB Scheduler"),
+            *SyncObj_MasterTaskPool = _T("TBB Scheduler"),
+            *SyncObj_TaskPoolSpinning = _T("TBB Scheduler"),
+            *SyncObj_Mailbox = _T("TBB Scheduler"),
+            *SyncObj_TaskReturnList = _T("TBB Scheduler"),
+            *SyncObj_TaskStream = _T("TBB Scheduler"),
+            *SyncObj_ContextsList = _T("TBB Scheduler")
+            ;
+#endif /* DO_ITT_NOTIFY */
+
+} // namespace tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/itt_notify.h
new file mode 100644 (file)
index 0000000..38cafd8
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_ITT_NOTIFY
+#define _TBB_ITT_NOTIFY
+
+#include "tbb/tbb_stddef.h"
+
+#if DO_ITT_NOTIFY
+
+#if _WIN32||_WIN64
+    #ifndef UNICODE
+        #define UNICODE
+    #endif
+#endif /* WIN */
+
+#include "tools_api/ittnotify.h"
+#include "tools_api/legacy/ittnotify.h"
+#include "tools_api/internal/ittnotify.h"
+
+#if _WIN32||_WIN64
+    #undef _T
+    #undef __itt_event_create
+    #define __itt_event_create __itt_event_createA
+#endif /* WIN */
+
+
+#endif /* DO_ITT_NOTIFY */
+
+#if !ITT_CALLER_NULL
+#define ITT_CALLER_NULL ((__itt_caller)0)
+#endif
+
+namespace tbb {
+//! Unicode support
+#if (_WIN32||_WIN64) && !__MINGW32__
+    //! Unicode character type. Always wchar_t on Windows.
+    /** We do not use typedefs from Windows TCHAR family to keep consistence of TBB coding style. **/
+    typedef wchar_t tchar;
+    //! Standard Windows macro to markup the string literals. 
+    #define _T(string_literal) L ## string_literal
+#else /* !WIN */
+    typedef char tchar;
+    //! Standard Windows style macro to markup the string literals.
+    #define _T(string_literal) string_literal
+#endif /* !WIN */
+} // namespace tbb
+
+#if DO_ITT_NOTIFY
+namespace tbb {
+    //! Display names of internal synchronization types
+    extern const tchar 
+            *SyncType_GlobalLock,
+            *SyncType_Scheduler;
+    //! Display names of internal synchronization components/scenarios
+    extern const tchar 
+            *SyncObj_SchedulerInitialization,
+            *SyncObj_SchedulersList,
+            *SyncObj_WorkerLifeCycleMgmt,
+            *SyncObj_TaskStealingLoop,
+            *SyncObj_WorkerTaskPool,
+            *SyncObj_MasterTaskPool,
+            *SyncObj_TaskPoolSpinning,
+            *SyncObj_Mailbox,
+            *SyncObj_TaskReturnList,
+            *SyncObj_TaskStream,
+            *SyncObj_ContextsList
+            ;
+
+    namespace internal {
+        void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void* obj, const tchar* name); 
+
+    } // namespace internal
+
+} // namespace tbb
+
+// const_cast<void*>() is necessary to cast off volatility
+#define ITT_NOTIFY(name,obj)            __itt_notify_##name(const_cast<void*>(static_cast<volatile void*>(obj)))
+#define ITT_THREAD_SET_NAME(name)       __itt_thread_set_name(name)
+#define ITT_SYNC_CREATE(obj, type, name) __itt_sync_create(obj, type, name, 2)
+#define ITT_SYNC_RENAME(obj, name)      __itt_sync_rename(obj, name)
+#define ITT_STACK_CREATE(obj)           obj = __itt_stack_caller_create()
+#define ITT_STACK(name, obj)            __itt_stack_##name(obj)
+
+#else /* !DO_ITT_NOTIFY */
+
+#define ITT_NOTIFY(name,obj)            ((void)0)
+#define ITT_THREAD_SET_NAME(name)       ((void)0)
+#define ITT_SYNC_CREATE(obj, type, name) ((void)0)
+#define ITT_SYNC_RENAME(obj, name)      ((void)0)
+#define ITT_STACK_CREATE(obj)           ((void)0)
+#define ITT_STACK(name, obj)            ((void)0)
+
+#endif /* !DO_ITT_NOTIFY */
+
+namespace tbb {
+namespace internal {
+int __TBB_load_ittnotify();
+}}
+
+#endif /* _TBB_ITT_NOTIFY */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin32-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin32-tbb-export.def
new file mode 100644 (file)
index 0000000..dadf982
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+
+{
+global:
+
+/* cache_aligned_allocator.cpp */
+_ZN3tbb8internal12NFS_AllocateEjjPv;
+_ZN3tbb8internal15NFS_GetLineSizeEv;
+_ZN3tbb8internal8NFS_FreeEPv;
+_ZN3tbb8internal23allocate_via_handler_v3Ej;
+_ZN3tbb8internal25deallocate_via_handler_v3EPv;
+_ZN3tbb8internal17is_malloc_used_v3Ev;
+
+/* task.cpp v3 */
+_ZN3tbb4task13note_affinityEt;
+_ZN3tbb4task22internal_set_ref_countEi;
+_ZN3tbb4task28internal_decrement_ref_countEv;
+_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE;
+_ZN3tbb4task4selfEv;
+_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE;
+_ZNK3tbb4task26is_owned_by_current_threadEv;
+_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE;
+_ZN3tbb8internal19allocate_root_proxy8allocateEj;
+_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj;
+_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal20allocate_child_proxy8allocateEj;
+_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal27allocate_continuation_proxy8allocateEj;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEj;
+_ZTIN3tbb4taskE;
+_ZTSN3tbb4taskE;
+_ZTVN3tbb4taskE;
+_ZN3tbb19task_scheduler_init19default_num_threadsEv;
+_ZN3tbb19task_scheduler_init10initializeEij;
+_ZN3tbb19task_scheduler_init10initializeEi;
+_ZN3tbb19task_scheduler_init9terminateEv;
+_ZN3tbb8internal26task_scheduler_observer_v37observeEb;
+_ZN3tbb10empty_task7executeEv;
+_ZN3tbb10empty_taskD0Ev;
+_ZN3tbb10empty_taskD1Ev;
+_ZTIN3tbb10empty_taskE;
+_ZTSN3tbb10empty_taskE;
+_ZTVN3tbb10empty_taskE;
+
+#if !TBB_NO_LEGACY
+/* task_v2.cpp */
+_ZN3tbb4task7destroyERS0_;
+#endif /* !TBB_NO_LEGACY */
+
+/* Exception handling in task scheduler */
+#if __TBB_TASK_GROUP_CONTEXT
+_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEj;
+_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE;
+_ZNK3tbb18task_group_context28is_group_execution_cancelledEv;
+_ZN3tbb18task_group_context22cancel_group_executionEv;
+_ZN3tbb18task_group_context26register_pending_exceptionEv;
+_ZN3tbb18task_group_context5resetEv;
+_ZN3tbb18task_group_context4initEv;
+_ZN3tbb18task_group_contextD1Ev;
+_ZN3tbb18task_group_contextD2Ev;
+_ZNK3tbb18captured_exception4nameEv;
+_ZNK3tbb18captured_exception4whatEv;
+_ZN3tbb18captured_exception10throw_selfEv;
+_ZN3tbb18captured_exception3setEPKcS2_;
+_ZN3tbb18captured_exception4moveEv;
+_ZN3tbb18captured_exception5clearEv;
+_ZN3tbb18captured_exception7destroyEv;
+_ZN3tbb18captured_exception8allocateEPKcS2_;
+_ZN3tbb18captured_exceptionD0Ev;
+_ZN3tbb18captured_exceptionD1Ev;
+_ZTIN3tbb18captured_exceptionE;
+_ZTSN3tbb18captured_exceptionE;
+_ZTVN3tbb18captured_exceptionE;
+_ZN3tbb13tbb_exceptionD2Ev;
+_ZTIN3tbb13tbb_exceptionE;
+_ZTSN3tbb13tbb_exceptionE;
+_ZTVN3tbb13tbb_exceptionE;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+/* Symbols for exceptions thrown from TBB */
+_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev;
+_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE;
+_ZN3tbb14bad_last_allocD0Ev;
+_ZN3tbb14bad_last_allocD1Ev;
+_ZNK3tbb14bad_last_alloc4whatEv;
+_ZTIN3tbb14bad_last_allocE;
+_ZTSN3tbb14bad_last_allocE;
+_ZTVN3tbb14bad_last_allocE;
+_ZN3tbb12missing_waitD0Ev;
+_ZN3tbb12missing_waitD1Ev;
+_ZNK3tbb12missing_wait4whatEv;
+_ZTIN3tbb12missing_waitE;
+_ZTSN3tbb12missing_waitE;
+_ZTVN3tbb12missing_waitE;
+_ZN3tbb27invalid_multiple_schedulingD0Ev;
+_ZN3tbb27invalid_multiple_schedulingD1Ev;
+_ZNK3tbb27invalid_multiple_scheduling4whatEv;
+_ZTIN3tbb27invalid_multiple_schedulingE;
+_ZTSN3tbb27invalid_multiple_schedulingE;
+_ZTVN3tbb27invalid_multiple_schedulingE;
+_ZN3tbb13improper_lockD0Ev;
+_ZN3tbb13improper_lockD1Ev;
+_ZNK3tbb13improper_lock4whatEv;
+_ZTIN3tbb13improper_lockE;
+_ZTSN3tbb13improper_lockE;
+_ZTVN3tbb13improper_lockE;
+
+/* tbb_misc.cpp */
+_ZN3tbb17assertion_failureEPKciS1_S1_;
+_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E;
+_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv;
+_ZN3tbb8internal13handle_perrorEiPKc;
+_ZN3tbb8internal15runtime_warningEPKcz;
+__TBB_machine_store8_slow_perf_warning;
+__TBB_machine_store8_slow;
+TBB_runtime_interface_version;
+
+/* itt_notify.cpp */
+_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv;
+_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_;
+_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc;
+_ZN3tbb8internal19itt_load_pointer_v3EPKv;
+
+/* pipeline.cpp */
+_ZTIN3tbb6filterE;
+_ZTSN3tbb6filterE;
+_ZTVN3tbb6filterE;
+_ZN3tbb6filterD2Ev;
+_ZN3tbb8pipeline10add_filterERNS_6filterE;
+_ZN3tbb8pipeline12inject_tokenERNS_4taskE;
+_ZN3tbb8pipeline13remove_filterERNS_6filterE;
+_ZN3tbb8pipeline3runEj;
+#if __TBB_TASK_GROUP_CONTEXT
+_ZN3tbb8pipeline3runEjRNS_18task_group_contextE;
+#endif
+_ZN3tbb8pipeline5clearEv;
+_ZN3tbb19thread_bound_filter12process_itemEv;
+_ZN3tbb19thread_bound_filter16try_process_itemEv;
+_ZTIN3tbb8pipelineE;
+_ZTSN3tbb8pipelineE;
+_ZTVN3tbb8pipelineE;
+_ZN3tbb8pipelineC1Ev;
+_ZN3tbb8pipelineC2Ev;
+_ZN3tbb8pipelineD0Ev;
+_ZN3tbb8pipelineD1Ev;
+_ZN3tbb8pipelineD2Ev;
+
+/* queuing_rw_mutex.cpp */
+_ZN3tbb16queuing_rw_mutex18internal_constructEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b;
+
+/* reader_writer_lock.cpp */
+_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock13try_lock_readEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock18internal_constructEv;
+_ZN3tbb10interface518reader_writer_lock4lockEv;
+_ZN3tbb10interface518reader_writer_lock6unlockEv;
+_ZN3tbb10interface518reader_writer_lock8try_lockEv;
+_ZN3tbb10interface518reader_writer_lock9lock_readEv;
+
+#if !TBB_NO_LEGACY
+/* spin_rw_mutex.cpp v2 */
+_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_;
+_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_;
+_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_;
+#endif
+
+/* spin_rw_mutex v3 */
+_ZN3tbb16spin_rw_mutex_v318internal_constructEv;
+_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv;
+_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv;
+
+/* spin_mutex.cpp */
+_ZN3tbb10spin_mutex18internal_constructEv;
+_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_;
+
+/* mutex.cpp */
+_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb5mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb5mutex16internal_destroyEv;
+_ZN3tbb5mutex18internal_constructEv;
+
+/* recursive_mutex.cpp */
+_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb15recursive_mutex16internal_destroyEv;
+_ZN3tbb15recursive_mutex18internal_constructEv;
+
+/* QueuingMutex.cpp */
+_ZN3tbb13queuing_mutex18internal_constructEv;
+_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_;
+_ZN3tbb13queuing_mutex11scoped_lock7releaseEv;
+_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_;
+
+/* critical_section.cpp */
+_ZN3tbb8internal19critical_section_v418internal_constructEv;
+
+#if !TBB_NO_LEGACY
+/* concurrent_hash_map */
+_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv;
+
+/* concurrent_queue.cpp v2 */
+_ZN3tbb8internal21concurrent_queue_base12internal_popEPv;
+_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv;
+_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEij;
+_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv;
+_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv;
+_ZN3tbb8internal21concurrent_queue_baseC2Ej;
+_ZN3tbb8internal21concurrent_queue_baseD2Ev;
+_ZTIN3tbb8internal21concurrent_queue_baseE;
+_ZTSN3tbb8internal21concurrent_queue_baseE;
+_ZTVN3tbb8internal21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_;
+_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv;
+_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev;
+_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv;
+#endif
+
+/* concurrent_queue v3 */
+/* constructors */
+_ZN3tbb8internal24concurrent_queue_base_v3C2Ej;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ej;
+/* destructors */
+_ZN3tbb8internal24concurrent_queue_base_v3D2Ev;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev;
+/* typeinfo */
+_ZTIN3tbb8internal24concurrent_queue_base_v3E;
+_ZTSN3tbb8internal24concurrent_queue_base_v3E;
+/* vtable */
+_ZTVN3tbb8internal24concurrent_queue_base_v3E;
+/* methods */
+_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_;
+_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv;
+_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEij;
+_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv;
+_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv;
+_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv;
+_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_;
+
+#if !TBB_NO_LEGACY
+/* concurrent_vector.cpp v2 */
+_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_jPFvPvPKvjE;
+_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvjEb;
+_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_;
+_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEjjPFvPvjE;
+_ZN3tbb8internal22concurrent_vector_base16internal_reserveEjjj;
+_ZN3tbb8internal22concurrent_vector_base18internal_push_backEjRj;
+_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEjjPFvPvjE;
+_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv;
+#endif
+
+/* concurrent_vector v3 */
+_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_jPFvPvPKvjE;
+_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvjE;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEjjPFvPvPKvjES4_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEjjj;
+_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEjRj;
+_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEjjPFvPvPKvjES4_;
+_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEjPvPFvS2_jEPFvS2_PKvjE;
+_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_;
+_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEj;
+_ZN3tbb8internal25concurrent_vector_base_v3D2Ev;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEjjjPKvPFvPvjEPFvS4_S3_jE;
+_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEjjPFvPvPKvjES4_;
+
+/* tbb_thread */
+#if __MINGW32__
+_ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_;
+#else
+_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_;
+#endif
+_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv;
+_ZN3tbb8internal13tbb_thread_v34joinEv;
+_ZN3tbb8internal13tbb_thread_v36detachEv;
+_ZN3tbb8internal15free_closure_v3EPv;
+_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE;
+_ZN3tbb8internal15thread_yield_v3Ev;
+_ZN3tbb8internal16thread_get_id_v3Ev;
+_ZN3tbb8internal19allocate_closure_v3Ej;
+_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_;
+
+#if __MINGW32__
+/* condition_variable */
+_ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE;
+_ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE;
+#endif
+
+local:
+
+/* TBB symbols */
+*3tbb*;
+*__TBB*;
+
+/* ITT symbols */
+__itt_*;
+
+/* Intel Compiler (libirc) symbols */
+__intel_*;
+_intel_*;
+get_memcpy_largest_cachelinesize;
+get_memcpy_largest_cache_size;
+get_mem_ops_method;
+init_mem_ops_method;
+irc__get_msg;
+irc__print;
+override_mem_ops_method;
+set_memcpy_largest_cachelinesize;
+set_memcpy_largest_cache_size;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64-tbb-export.def
new file mode 100644 (file)
index 0000000..159aec6
--- /dev/null
@@ -0,0 +1,357 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+
+{
+global:
+
+/* cache_aligned_allocator.cpp */
+_ZN3tbb8internal12NFS_AllocateEmmPv;
+_ZN3tbb8internal15NFS_GetLineSizeEv;
+_ZN3tbb8internal8NFS_FreeEPv;
+_ZN3tbb8internal23allocate_via_handler_v3Em;
+_ZN3tbb8internal25deallocate_via_handler_v3EPv;
+_ZN3tbb8internal17is_malloc_used_v3Ev;
+
+/* task.cpp v3 */
+_ZN3tbb4task13note_affinityEt;
+_ZN3tbb4task22internal_set_ref_countEi;
+_ZN3tbb4task28internal_decrement_ref_countEv;
+_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE;
+_ZN3tbb4task4selfEv;
+_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE;
+_ZNK3tbb4task26is_owned_by_current_threadEv;
+_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE;
+_ZN3tbb8internal19allocate_root_proxy8allocateEm;
+_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj;
+_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal20allocate_child_proxy8allocateEm;
+_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal27allocate_continuation_proxy8allocateEm;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm;
+_ZTIN3tbb4taskE;
+_ZTSN3tbb4taskE;
+_ZTVN3tbb4taskE;
+_ZN3tbb19task_scheduler_init19default_num_threadsEv;
+_ZN3tbb19task_scheduler_init10initializeEim;
+_ZN3tbb19task_scheduler_init10initializeEi;
+_ZN3tbb19task_scheduler_init9terminateEv;
+_ZN3tbb8internal26task_scheduler_observer_v37observeEb;
+_ZN3tbb10empty_task7executeEv;
+_ZN3tbb10empty_taskD0Ev;
+_ZN3tbb10empty_taskD1Ev;
+_ZTIN3tbb10empty_taskE;
+_ZTSN3tbb10empty_taskE;
+_ZTVN3tbb10empty_taskE;
+
+#if !TBB_NO_LEGACY
+/* task_v2.cpp */
+_ZN3tbb4task7destroyERS0_;
+#endif /* !TBB_NO_LEGACY */
+
+/* Exception handling in task scheduler */
+#if __TBB_TASK_GROUP_CONTEXT
+_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm;
+_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE;
+_ZNK3tbb18task_group_context28is_group_execution_cancelledEv;
+_ZN3tbb18task_group_context22cancel_group_executionEv;
+_ZN3tbb18task_group_context26register_pending_exceptionEv;
+_ZN3tbb18task_group_context5resetEv;
+_ZN3tbb18task_group_context4initEv;
+_ZN3tbb18task_group_contextD1Ev;
+_ZN3tbb18task_group_contextD2Ev;
+_ZNK3tbb18captured_exception4nameEv;
+_ZNK3tbb18captured_exception4whatEv;
+_ZN3tbb18captured_exception10throw_selfEv;
+_ZN3tbb18captured_exception3setEPKcS2_;
+_ZN3tbb18captured_exception4moveEv;
+_ZN3tbb18captured_exception5clearEv;
+_ZN3tbb18captured_exception7destroyEv;
+_ZN3tbb18captured_exception8allocateEPKcS2_;
+_ZN3tbb18captured_exceptionD0Ev;
+_ZN3tbb18captured_exceptionD1Ev;
+_ZTIN3tbb18captured_exceptionE;
+_ZTSN3tbb18captured_exceptionE;
+_ZTVN3tbb18captured_exceptionE;
+_ZN3tbb13tbb_exceptionD2Ev;
+_ZTIN3tbb13tbb_exceptionE;
+_ZTSN3tbb13tbb_exceptionE;
+_ZTVN3tbb13tbb_exceptionE;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+/* Symbols for exceptions thrown from TBB */
+_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev;
+_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE;
+_ZN3tbb14bad_last_allocD0Ev;
+_ZN3tbb14bad_last_allocD1Ev;
+_ZNK3tbb14bad_last_alloc4whatEv;
+_ZTIN3tbb14bad_last_allocE;
+_ZTSN3tbb14bad_last_allocE;
+_ZTVN3tbb14bad_last_allocE;
+_ZN3tbb12missing_waitD0Ev;
+_ZN3tbb12missing_waitD1Ev;
+_ZNK3tbb12missing_wait4whatEv;
+_ZTIN3tbb12missing_waitE;
+_ZTSN3tbb12missing_waitE;
+_ZTVN3tbb12missing_waitE;
+_ZN3tbb27invalid_multiple_schedulingD0Ev;
+_ZN3tbb27invalid_multiple_schedulingD1Ev;
+_ZNK3tbb27invalid_multiple_scheduling4whatEv;
+_ZTIN3tbb27invalid_multiple_schedulingE;
+_ZTSN3tbb27invalid_multiple_schedulingE;
+_ZTVN3tbb27invalid_multiple_schedulingE;
+_ZN3tbb13improper_lockD0Ev;
+_ZN3tbb13improper_lockD1Ev;
+_ZNK3tbb13improper_lock4whatEv;
+_ZTIN3tbb13improper_lockE;
+_ZTSN3tbb13improper_lockE;
+_ZTVN3tbb13improper_lockE;
+
+/* tbb_misc.cpp */
+_ZN3tbb17assertion_failureEPKciS1_S1_;
+_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E;
+_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv;
+_ZN3tbb8internal13handle_perrorEiPKc;
+_ZN3tbb8internal15runtime_warningEPKcz;
+TBB_runtime_interface_version;
+
+/* itt_notify.cpp */
+_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv;
+_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_;
+_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc;
+_ZN3tbb8internal19itt_load_pointer_v3EPKv;
+
+/* pipeline.cpp */
+_ZTIN3tbb6filterE;
+_ZTSN3tbb6filterE;
+_ZTVN3tbb6filterE;
+_ZN3tbb6filterD2Ev;
+_ZN3tbb8pipeline10add_filterERNS_6filterE;
+_ZN3tbb8pipeline12inject_tokenERNS_4taskE;
+_ZN3tbb8pipeline13remove_filterERNS_6filterE;
+_ZN3tbb8pipeline3runEm;
+#if __TBB_TASK_GROUP_CONTEXT
+_ZN3tbb8pipeline3runEmRNS_18task_group_contextE;
+#endif
+_ZN3tbb8pipeline5clearEv;
+_ZN3tbb19thread_bound_filter12process_itemEv;
+_ZN3tbb19thread_bound_filter16try_process_itemEv;
+_ZTIN3tbb8pipelineE;
+_ZTSN3tbb8pipelineE;
+_ZTVN3tbb8pipelineE;
+_ZN3tbb8pipelineC1Ev;
+_ZN3tbb8pipelineC2Ev;
+_ZN3tbb8pipelineD0Ev;
+_ZN3tbb8pipelineD1Ev;
+_ZN3tbb8pipelineD2Ev;
+
+/* queuing_rw_mutex.cpp */
+_ZN3tbb16queuing_rw_mutex18internal_constructEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b;
+
+/* reader_writer_lock.cpp */
+_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock13try_lock_readEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock18internal_constructEv;
+_ZN3tbb10interface518reader_writer_lock4lockEv;
+_ZN3tbb10interface518reader_writer_lock6unlockEv;
+_ZN3tbb10interface518reader_writer_lock8try_lockEv;
+_ZN3tbb10interface518reader_writer_lock9lock_readEv;
+
+#if !TBB_NO_LEGACY
+/* spin_rw_mutex.cpp v2 */
+_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_;
+_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_;
+_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_;
+#endif
+
+/* spin_rw_mutex v3 */
+_ZN3tbb16spin_rw_mutex_v318internal_constructEv;
+_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv;
+_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv;
+
+/* spin_mutex.cpp */
+_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb10spin_mutex18internal_constructEv;
+
+/* mutex.cpp */
+_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb5mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb5mutex16internal_destroyEv;
+_ZN3tbb5mutex18internal_constructEv;
+
+/* recursive_mutex.cpp */
+_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb15recursive_mutex16internal_destroyEv;
+_ZN3tbb15recursive_mutex18internal_constructEv;
+
+/* QueuingMutex.cpp */
+_ZN3tbb13queuing_mutex18internal_constructEv;
+_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_;
+_ZN3tbb13queuing_mutex11scoped_lock7releaseEv;
+_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_;
+
+/* critical_section.cpp */
+_ZN3tbb8internal19critical_section_v418internal_constructEv;
+
+#if !TBB_NO_LEGACY
+/* concurrent_hash_map */
+_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv;
+
+/* concurrent_queue.cpp v2 */
+_ZN3tbb8internal21concurrent_queue_base12internal_popEPv;
+_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv;
+_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm;
+_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv;
+_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv;
+_ZN3tbb8internal21concurrent_queue_baseC2Em;
+_ZN3tbb8internal21concurrent_queue_baseD2Ev;
+_ZTIN3tbb8internal21concurrent_queue_baseE;
+_ZTSN3tbb8internal21concurrent_queue_baseE;
+_ZTVN3tbb8internal21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_;
+_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv;
+_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev;
+_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv;
+#endif
+
+/* concurrent_queue v3 */
+/* constructors */
+_ZN3tbb8internal24concurrent_queue_base_v3C2Em;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em;
+/* destructors */
+_ZN3tbb8internal24concurrent_queue_base_v3D2Ev;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev;
+/* typeinfo */
+_ZTIN3tbb8internal24concurrent_queue_base_v3E;
+_ZTSN3tbb8internal24concurrent_queue_base_v3E;
+/* vtable */
+_ZTVN3tbb8internal24concurrent_queue_base_v3E;
+/* methods */
+_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv;
+_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv;
+_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm;
+_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv;
+_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv;
+_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv;
+_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_;
+
+#if !TBB_NO_LEGACY
+/* concurrent_vector.cpp v2 */
+_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE;
+_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb;
+_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_;
+_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE;
+_ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm;
+_ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm;
+_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE;
+_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv;
+#endif
+
+/* concurrent_vector v3 */
+_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE;
+_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm;
+_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm;
+_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_;
+_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE;
+_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_;
+_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm;
+_ZN3tbb8internal25concurrent_vector_base_v3D2Ev;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE;
+_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_;
+
+/* tbb_thread */
+_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv;
+_ZN3tbb8internal13tbb_thread_v36detachEv;
+_ZN3tbb8internal16thread_get_id_v3Ev;
+_ZN3tbb8internal15free_closure_v3EPv;
+_ZN3tbb8internal13tbb_thread_v34joinEv;
+_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_;
+_ZN3tbb8internal19allocate_closure_v3Em;
+_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_;
+_ZN3tbb8internal15thread_yield_v3Ev;
+_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE;
+
+local:
+
+/* TBB symbols */
+*3tbb*;
+*__TBB*;
+
+/* ITT symbols */
+__itt_*;
+
+/* Intel Compiler (libirc) symbols */
+__intel_*;
+_intel_*;
+get_msg_buf;
+get_text_buf;
+message_catalog;
+print_buf;
+irc__get_msg;
+irc__print;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def
new file mode 100644 (file)
index 0000000..ba5a0be
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+
+{
+global:
+
+/* cache_aligned_allocator.cpp */
+_ZN3tbb8internal12NFS_AllocateEmmPv;
+_ZN3tbb8internal15NFS_GetLineSizeEv;
+_ZN3tbb8internal8NFS_FreeEPv;
+_ZN3tbb8internal23allocate_via_handler_v3Em;
+_ZN3tbb8internal25deallocate_via_handler_v3EPv;
+_ZN3tbb8internal17is_malloc_used_v3Ev;
+
+/* task.cpp v3 */
+_ZN3tbb4task13note_affinityEt;
+_ZN3tbb4task22internal_set_ref_countEi;
+_ZN3tbb4task28internal_decrement_ref_countEv;
+_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE;
+_ZN3tbb4task4selfEv;
+_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE;
+_ZNK3tbb4task26is_owned_by_current_threadEv;
+_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE;
+_ZN3tbb8internal19allocate_root_proxy8allocateEm;
+_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj;
+_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal20allocate_child_proxy8allocateEm;
+_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal27allocate_continuation_proxy8allocateEm;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm;
+_ZTIN3tbb4taskE;
+_ZTSN3tbb4taskE;
+_ZTVN3tbb4taskE;
+_ZN3tbb19task_scheduler_init19default_num_threadsEv;
+_ZN3tbb19task_scheduler_init10initializeEim;
+_ZN3tbb19task_scheduler_init10initializeEi;
+_ZN3tbb19task_scheduler_init9terminateEv;
+_ZN3tbb8internal26task_scheduler_observer_v37observeEb;
+_ZN3tbb10empty_task7executeEv;
+_ZN3tbb10empty_taskD0Ev;
+_ZN3tbb10empty_taskD1Ev;
+_ZTIN3tbb10empty_taskE;
+_ZTSN3tbb10empty_taskE;
+_ZTVN3tbb10empty_taskE;
+
+#if !TBB_NO_LEGACY
+/* task_v2.cpp */
+_ZN3tbb4task7destroyERS0_;
+#endif /* !TBB_NO_LEGACY */
+
+/* Exception handling in task scheduler */
+#if __TBB_TASK_GROUP_CONTEXT
+_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm;
+_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE;
+_ZNK3tbb18task_group_context28is_group_execution_cancelledEv;
+_ZN3tbb18task_group_context22cancel_group_executionEv;
+_ZN3tbb18task_group_context26register_pending_exceptionEv;
+_ZN3tbb18task_group_context5resetEv;
+_ZN3tbb18task_group_context4initEv;
+_ZN3tbb18task_group_contextD1Ev;
+_ZN3tbb18task_group_contextD2Ev;
+_ZNK3tbb18captured_exception4nameEv;
+_ZNK3tbb18captured_exception4whatEv;
+_ZN3tbb18captured_exception10throw_selfEv;
+_ZN3tbb18captured_exception3setEPKcS2_;
+_ZN3tbb18captured_exception4moveEv;
+_ZN3tbb18captured_exception5clearEv;
+_ZN3tbb18captured_exception7destroyEv;
+_ZN3tbb18captured_exception8allocateEPKcS2_;
+_ZN3tbb18captured_exceptionD0Ev;
+_ZN3tbb18captured_exceptionD1Ev;
+_ZTIN3tbb18captured_exceptionE;
+_ZTSN3tbb18captured_exceptionE;
+_ZTVN3tbb18captured_exceptionE;
+_ZN3tbb13tbb_exceptionD2Ev;
+_ZTIN3tbb13tbb_exceptionE;
+_ZTSN3tbb13tbb_exceptionE;
+_ZTVN3tbb13tbb_exceptionE;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+/* Symbols for exceptions thrown from TBB */
+_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev;
+_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE;
+_ZN3tbb14bad_last_allocD0Ev;
+_ZN3tbb14bad_last_allocD1Ev;
+_ZNK3tbb14bad_last_alloc4whatEv;
+_ZTIN3tbb14bad_last_allocE;
+_ZTSN3tbb14bad_last_allocE;
+_ZTVN3tbb14bad_last_allocE;
+_ZN3tbb12missing_waitD0Ev;
+_ZN3tbb12missing_waitD1Ev;
+_ZNK3tbb12missing_wait4whatEv;
+_ZTIN3tbb12missing_waitE;
+_ZTSN3tbb12missing_waitE;
+_ZTVN3tbb12missing_waitE;
+_ZN3tbb27invalid_multiple_schedulingD0Ev;
+_ZN3tbb27invalid_multiple_schedulingD1Ev;
+_ZNK3tbb27invalid_multiple_scheduling4whatEv;
+_ZTIN3tbb27invalid_multiple_schedulingE;
+_ZTSN3tbb27invalid_multiple_schedulingE;
+_ZTVN3tbb27invalid_multiple_schedulingE;
+_ZN3tbb13improper_lockD0Ev;
+_ZN3tbb13improper_lockD1Ev;
+_ZNK3tbb13improper_lock4whatEv;
+_ZTIN3tbb13improper_lockE;
+_ZTSN3tbb13improper_lockE;
+_ZTVN3tbb13improper_lockE;
+
+/* tbb_misc.cpp */
+_ZN3tbb17assertion_failureEPKciS1_S1_;
+_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E;
+_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv;
+_ZN3tbb8internal13handle_perrorEiPKc;
+_ZN3tbb8internal15runtime_warningEPKcz;
+TBB_runtime_interface_version;
+
+/* itt_notify.cpp */
+_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv;
+_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_;
+_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc;
+_ZN3tbb8internal19itt_load_pointer_v3EPKv;
+
+/* pipeline.cpp */
+_ZTIN3tbb6filterE;
+_ZTSN3tbb6filterE;
+_ZTVN3tbb6filterE;
+_ZN3tbb6filterD2Ev;
+_ZN3tbb8pipeline10add_filterERNS_6filterE;
+_ZN3tbb8pipeline12inject_tokenERNS_4taskE;
+_ZN3tbb8pipeline13remove_filterERNS_6filterE;
+_ZN3tbb8pipeline3runEm;
+#if __TBB_TASK_GROUP_CONTEXT
+_ZN3tbb8pipeline3runEmRNS_18task_group_contextE;
+#endif
+_ZN3tbb8pipeline5clearEv;
+_ZN3tbb19thread_bound_filter12process_itemEv;
+_ZN3tbb19thread_bound_filter16try_process_itemEv;
+_ZTIN3tbb8pipelineE;
+_ZTSN3tbb8pipelineE;
+_ZTVN3tbb8pipelineE;
+_ZN3tbb8pipelineC1Ev;
+_ZN3tbb8pipelineC2Ev;
+_ZN3tbb8pipelineD0Ev;
+_ZN3tbb8pipelineD1Ev;
+_ZN3tbb8pipelineD2Ev;
+
+/* queuing_rw_mutex.cpp */
+_ZN3tbb16queuing_rw_mutex18internal_constructEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b;
+
+/* reader_writer_lock.cpp */
+_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock13try_lock_readEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock18internal_constructEv;
+_ZN3tbb10interface518reader_writer_lock4lockEv;
+_ZN3tbb10interface518reader_writer_lock6unlockEv;
+_ZN3tbb10interface518reader_writer_lock8try_lockEv;
+_ZN3tbb10interface518reader_writer_lock9lock_readEv;
+
+#if !TBB_NO_LEGACY
+/* spin_rw_mutex.cpp v2 */
+_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_;
+_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_;
+_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_;
+#endif
+
+/* spin_rw_mutex v3 */
+_ZN3tbb16spin_rw_mutex_v318internal_constructEv;
+_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv;
+_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv;
+
+/* spin_mutex.cpp */
+_ZN3tbb10spin_mutex18internal_constructEv;
+_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_;
+
+/* mutex.cpp */
+_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb5mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb5mutex16internal_destroyEv;
+_ZN3tbb5mutex18internal_constructEv;
+
+/* recursive_mutex.cpp */
+_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb15recursive_mutex16internal_destroyEv;
+_ZN3tbb15recursive_mutex18internal_constructEv;
+
+/* QueuingMutex.cpp */
+_ZN3tbb13queuing_mutex18internal_constructEv;
+_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_;
+_ZN3tbb13queuing_mutex11scoped_lock7releaseEv;
+_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_;
+
+/* critical_section.cpp */
+_ZN3tbb8internal19critical_section_v418internal_constructEv;
+
+#if !TBB_NO_LEGACY
+/* concurrent_hash_map */
+_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv;
+
+/* concurrent_queue.cpp v2 */
+_ZN3tbb8internal21concurrent_queue_base12internal_popEPv;
+_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv;
+_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm;
+_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv;
+_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv;
+_ZN3tbb8internal21concurrent_queue_baseC2Em;
+_ZN3tbb8internal21concurrent_queue_baseD2Ev;
+_ZTIN3tbb8internal21concurrent_queue_baseE;
+_ZTSN3tbb8internal21concurrent_queue_baseE;
+_ZTVN3tbb8internal21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_;
+_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv;
+_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev;
+_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv;
+#endif
+
+/* concurrent_queue v3 */
+/* constructors */
+_ZN3tbb8internal24concurrent_queue_base_v3C2Em;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em;
+/* destructors */
+_ZN3tbb8internal24concurrent_queue_base_v3D2Ev;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev;
+/* typeinfo */
+_ZTIN3tbb8internal24concurrent_queue_base_v3E;
+_ZTSN3tbb8internal24concurrent_queue_base_v3E;
+/* vtable */
+_ZTVN3tbb8internal24concurrent_queue_base_v3E;
+/* methods */
+_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv;
+_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv;
+_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm;
+_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv;
+_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv;
+_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv;
+_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_;
+
+#if !TBB_NO_LEGACY
+/* concurrent_vector.cpp v2 */
+_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE;
+_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb;
+_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_;
+_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE;
+_ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm;
+_ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm;
+_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE;
+_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv;
+#endif
+
+/* concurrent_vector v3 */
+_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE;
+_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm;
+_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm;
+_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_;
+_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE;
+_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_;
+_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm;
+_ZN3tbb8internal25concurrent_vector_base_v3D2Ev;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE;
+_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_;
+
+/* tbb_thread */
+_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv;
+_ZN3tbb8internal13tbb_thread_v36detachEv;
+_ZN3tbb8internal16thread_get_id_v3Ev;
+_ZN3tbb8internal15free_closure_v3EPv;
+_ZN3tbb8internal13tbb_thread_v34joinEv;
+_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_;
+_ZN3tbb8internal19allocate_closure_v3Em;
+_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_;
+_ZN3tbb8internal15thread_yield_v3Ev;
+_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE;
+
+/* asm functions */
+__TBB_machine_fetchadd1__TBB_full_fence;
+__TBB_machine_fetchadd2__TBB_full_fence;
+__TBB_machine_fetchadd4__TBB_full_fence;
+__TBB_machine_fetchadd8__TBB_full_fence;
+__TBB_machine_fetchstore1__TBB_full_fence;
+__TBB_machine_fetchstore2__TBB_full_fence;
+__TBB_machine_fetchstore4__TBB_full_fence;
+__TBB_machine_fetchstore8__TBB_full_fence;
+__TBB_machine_fetchadd1acquire;
+__TBB_machine_fetchadd1release;
+__TBB_machine_fetchadd2acquire;
+__TBB_machine_fetchadd2release;
+__TBB_machine_fetchadd4acquire;
+__TBB_machine_fetchadd4release;
+__TBB_machine_fetchadd8acquire;
+__TBB_machine_fetchadd8release;
+__TBB_machine_fetchstore1acquire;
+__TBB_machine_fetchstore1release;
+__TBB_machine_fetchstore2acquire;
+__TBB_machine_fetchstore2release;
+__TBB_machine_fetchstore4acquire;
+__TBB_machine_fetchstore4release;
+__TBB_machine_fetchstore8acquire;
+__TBB_machine_fetchstore8release;
+__TBB_machine_cmpswp1acquire;
+__TBB_machine_cmpswp1release;
+__TBB_machine_cmpswp1__TBB_full_fence;
+__TBB_machine_cmpswp2acquire;
+__TBB_machine_cmpswp2release;
+__TBB_machine_cmpswp2__TBB_full_fence;
+__TBB_machine_cmpswp4acquire;
+__TBB_machine_cmpswp4release;
+__TBB_machine_cmpswp4__TBB_full_fence;
+__TBB_machine_cmpswp8acquire;
+__TBB_machine_cmpswp8release;
+__TBB_machine_cmpswp8__TBB_full_fence;
+__TBB_machine_lg;
+__TBB_machine_lockbyte;
+__TBB_machine_pause;
+__TBB_machine_trylockbyte;
+
+local:
+
+/* TBB symbols */
+*3tbb*;
+*__TBB*;
+
+/* ITT symbols */
+__itt_*;
+
+/* Intel Compiler (libirc) symbols */
+__intel_*;
+_intel_*;
+?0_memcopyA;
+?0_memcopyDu;
+?0_memcpyD;
+?1__memcpy;
+?1__memmove;
+?1__serial_memmove;
+memcpy;
+memset;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac32-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac32-tbb-export.def
new file mode 100644 (file)
index 0000000..8725d9d
--- /dev/null
@@ -0,0 +1,343 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# cache_aligned_allocator.cpp
+__ZN3tbb8internal12NFS_AllocateEmmPv
+__ZN3tbb8internal15NFS_GetLineSizeEv
+__ZN3tbb8internal8NFS_FreeEPv
+__ZN3tbb8internal23allocate_via_handler_v3Em
+__ZN3tbb8internal25deallocate_via_handler_v3EPv
+__ZN3tbb8internal17is_malloc_used_v3Ev
+
+# task.cpp v3
+__ZN3tbb4task13note_affinityEt
+__ZN3tbb4task22internal_set_ref_countEi
+__ZN3tbb4task28internal_decrement_ref_countEv
+__ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE
+__ZN3tbb4task4selfEv
+__ZN3tbb10interface58internal9task_base7destroyERNS_4taskE
+__ZNK3tbb4task26is_owned_by_current_threadEv
+__ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE
+__ZN3tbb8internal19allocate_root_proxy8allocateEm
+__ZN3tbb8internal28affinity_partitioner_base_v36resizeEj
+__ZN3tbb8internal36get_initial_auto_partitioner_divisorEv
+__ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE
+__ZNK3tbb8internal20allocate_child_proxy8allocateEm
+__ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE
+__ZNK3tbb8internal27allocate_continuation_proxy8allocateEm
+__ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE
+__ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm
+__ZTIN3tbb4taskE
+__ZTSN3tbb4taskE
+__ZTVN3tbb4taskE
+__ZN3tbb19task_scheduler_init19default_num_threadsEv
+__ZN3tbb19task_scheduler_init10initializeEim
+__ZN3tbb19task_scheduler_init10initializeEi
+__ZN3tbb19task_scheduler_init9terminateEv
+__ZN3tbb8internal26task_scheduler_observer_v37observeEb
+__ZN3tbb10empty_task7executeEv
+__ZN3tbb10empty_taskD0Ev
+__ZN3tbb10empty_taskD1Ev
+__ZTIN3tbb10empty_taskE
+__ZTSN3tbb10empty_taskE
+__ZTVN3tbb10empty_taskE
+
+#if !TBB_NO_LEGACY
+# task_v2.cpp
+__ZN3tbb4task7destroyERS0_
+#endif
+
+# Exception handling in task scheduler
+__ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm
+__ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE
+__ZNK3tbb18task_group_context28is_group_execution_cancelledEv
+__ZN3tbb18task_group_context22cancel_group_executionEv
+__ZN3tbb18task_group_context26register_pending_exceptionEv
+__ZN3tbb18task_group_context5resetEv
+__ZN3tbb18task_group_context4initEv
+__ZN3tbb18task_group_contextD1Ev
+__ZN3tbb18task_group_contextD2Ev
+__ZNK3tbb18captured_exception4nameEv
+__ZNK3tbb18captured_exception4whatEv
+__ZN3tbb18captured_exception10throw_selfEv
+__ZN3tbb18captured_exception3setEPKcS2_
+__ZN3tbb18captured_exception4moveEv
+__ZN3tbb18captured_exception5clearEv
+__ZN3tbb18captured_exception7destroyEv
+__ZN3tbb18captured_exception8allocateEPKcS2_
+__ZN3tbb18captured_exceptionD0Ev
+__ZN3tbb18captured_exceptionD1Ev
+__ZTIN3tbb18captured_exceptionE
+__ZTSN3tbb18captured_exceptionE
+__ZTVN3tbb18captured_exceptionE
+__ZTIN3tbb13tbb_exceptionE
+__ZTSN3tbb13tbb_exceptionE
+__ZTVN3tbb13tbb_exceptionE
+
+# Symbols for exceptions thrown from TBB
+__ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev
+__ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE
+__ZNSt13runtime_errorD1Ev
+__ZTISt13runtime_error
+__ZTSSt13runtime_error
+__ZNSt16invalid_argumentD1Ev
+__ZTISt16invalid_argument
+__ZTSSt16invalid_argument
+__ZNSt11range_errorD1Ev
+__ZTISt11range_error
+__ZTSSt11range_error
+__ZNSt12length_errorD1Ev
+__ZTISt12length_error
+__ZTSSt12length_error
+__ZNSt12out_of_rangeD1Ev
+__ZTISt12out_of_range
+__ZTSSt12out_of_range
+__ZN3tbb14bad_last_allocD0Ev
+__ZN3tbb14bad_last_allocD1Ev
+__ZNK3tbb14bad_last_alloc4whatEv
+__ZTIN3tbb14bad_last_allocE
+__ZTSN3tbb14bad_last_allocE
+__ZTVN3tbb14bad_last_allocE
+__ZN3tbb12missing_waitD0Ev
+__ZN3tbb12missing_waitD1Ev
+__ZNK3tbb12missing_wait4whatEv
+__ZTIN3tbb12missing_waitE
+__ZTSN3tbb12missing_waitE
+__ZTVN3tbb12missing_waitE
+__ZN3tbb27invalid_multiple_schedulingD0Ev
+__ZN3tbb27invalid_multiple_schedulingD1Ev
+__ZNK3tbb27invalid_multiple_scheduling4whatEv
+__ZTIN3tbb27invalid_multiple_schedulingE
+__ZTSN3tbb27invalid_multiple_schedulingE
+__ZTVN3tbb27invalid_multiple_schedulingE
+__ZN3tbb13improper_lockD0Ev
+__ZN3tbb13improper_lockD1Ev
+__ZNK3tbb13improper_lock4whatEv
+__ZTIN3tbb13improper_lockE
+__ZTSN3tbb13improper_lockE
+__ZTVN3tbb13improper_lockE
+
+# tbb_misc.cpp
+__ZN3tbb17assertion_failureEPKciS1_S1_
+__ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E
+__ZN3tbb8internal13handle_perrorEiPKc
+__ZN3tbb8internal15runtime_warningEPKcz
+#ifndef __POWERPC__
+___TBB_machine_store8_slow_perf_warning
+___TBB_machine_store8_slow
+#endif
+_TBB_runtime_interface_version
+
+# itt_notify.cpp
+__ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv
+__ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_
+__ZN3tbb8internal19itt_load_pointer_v3EPKv
+__ZN3tbb8internal20itt_set_sync_name_v3EPvPKc
+
+# pipeline.cpp
+__ZTIN3tbb6filterE
+__ZTSN3tbb6filterE
+__ZTVN3tbb6filterE
+__ZN3tbb6filterD2Ev
+__ZN3tbb8pipeline10add_filterERNS_6filterE
+__ZN3tbb8pipeline12inject_tokenERNS_4taskE
+__ZN3tbb8pipeline13remove_filterERNS_6filterE
+__ZN3tbb8pipeline3runEm
+__ZN3tbb8pipeline3runEmRNS_18task_group_contextE
+__ZN3tbb8pipeline5clearEv
+__ZN3tbb19thread_bound_filter12process_itemEv
+__ZN3tbb19thread_bound_filter16try_process_itemEv
+__ZN3tbb8pipelineC1Ev
+__ZN3tbb8pipelineC2Ev
+__ZN3tbb8pipelineD0Ev
+__ZN3tbb8pipelineD1Ev
+__ZN3tbb8pipelineD2Ev
+__ZTIN3tbb8pipelineE
+__ZTSN3tbb8pipelineE
+__ZTVN3tbb8pipelineE
+
+# queuing_rw_mutex.cpp
+__ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b
+__ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b
+__ZN3tbb16queuing_rw_mutex18internal_constructEv
+
+# reader_writer_lock.cpp
+__ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_
+__ZN3tbb10interface518reader_writer_lock13try_lock_readEv
+__ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_
+__ZN3tbb10interface518reader_writer_lock16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock18internal_constructEv
+__ZN3tbb10interface518reader_writer_lock4lockEv
+__ZN3tbb10interface518reader_writer_lock6unlockEv
+__ZN3tbb10interface518reader_writer_lock8try_lockEv
+__ZN3tbb10interface518reader_writer_lock9lock_readEv
+
+#if !TBB_NO_LEGACY
+# spin_rw_mutex.cpp v2
+__ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_
+__ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_
+__ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_
+__ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_
+__ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_
+__ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_
+__ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_
+__ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_
+__ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_
+#endif
+
+# spin_rw_mutex v3
+__ZN3tbb16spin_rw_mutex_v316internal_upgradeEv
+__ZN3tbb16spin_rw_mutex_v318internal_downgradeEv
+__ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv
+__ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv
+__ZN3tbb16spin_rw_mutex_v323internal_release_readerEv
+__ZN3tbb16spin_rw_mutex_v323internal_release_writerEv
+__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv
+__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv
+__ZN3tbb16spin_rw_mutex_v318internal_constructEv
+
+# spin_mutex.cpp
+__ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv
+__ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb10spin_mutex18internal_constructEv
+
+# mutex.cpp
+__ZN3tbb5mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb5mutex11scoped_lock16internal_releaseEv
+__ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb5mutex16internal_destroyEv
+__ZN3tbb5mutex18internal_constructEv
+
+# recursive_mutex.cpp
+__ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv
+__ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb15recursive_mutex16internal_destroyEv
+__ZN3tbb15recursive_mutex18internal_constructEv
+
+# queuing_mutex.cpp
+__ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_
+__ZN3tbb13queuing_mutex11scoped_lock7releaseEv
+__ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_
+__ZN3tbb13queuing_mutex18internal_constructEv
+
+# critical_section.cpp
+__ZN3tbb8internal19critical_section_v418internal_constructEv
+
+#if !TBB_NO_LEGACY
+# concurrent_hash_map
+__ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv
+
+# concurrent_queue.cpp v2
+__ZN3tbb8internal21concurrent_queue_base12internal_popEPv
+__ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv
+__ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEim
+__ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv
+__ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv
+__ZN3tbb8internal21concurrent_queue_baseC2Em
+__ZN3tbb8internal21concurrent_queue_baseD2Ev
+__ZTIN3tbb8internal21concurrent_queue_baseE
+__ZTSN3tbb8internal21concurrent_queue_baseE
+__ZTVN3tbb8internal21concurrent_queue_baseE
+__ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_
+__ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv
+__ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE
+__ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev
+__ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv
+#endif
+
+# concurrent_queue v3
+# constructors
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em
+__ZN3tbb8internal24concurrent_queue_base_v3C2Em
+# destructors
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev
+__ZN3tbb8internal24concurrent_queue_base_v3D2Ev
+# typeinfo
+__ZTIN3tbb8internal24concurrent_queue_base_v3E
+__ZTSN3tbb8internal24concurrent_queue_base_v3E
+#vtable
+__ZTVN3tbb8internal24concurrent_queue_base_v3E
+# methods
+__ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv
+__ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_
+__ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv
+__ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv
+__ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv
+__ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv
+__ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEim
+__ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv
+__ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv
+__ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv
+__ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv
+__ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_
+
+#if !TBB_NO_LEGACY
+# concurrent_vector.cpp v2
+__ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE
+__ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb
+__ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_
+__ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE
+__ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm
+__ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm
+__ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE
+__ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv
+#endif
+
+# concurrent_vector v3
+__ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE
+__ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE
+__ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_
+__ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_
+__ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm
+__ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm
+__ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_
+__ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv
+__ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE
+__ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_
+__ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm
+__ZN3tbb8internal25concurrent_vector_base_v3D2Ev
+__ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE
+__ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_
+
+# tbb_thread
+__ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_
+__ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv
+__ZN3tbb8internal13tbb_thread_v34joinEv
+__ZN3tbb8internal13tbb_thread_v36detachEv
+__ZN3tbb8internal15free_closure_v3EPv
+__ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE
+__ZN3tbb8internal15thread_yield_v3Ev
+__ZN3tbb8internal16thread_get_id_v3Ev
+__ZN3tbb8internal19allocate_closure_v3Em
+__ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac64-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mac64-tbb-export.def
new file mode 100644 (file)
index 0000000..9d8615c
--- /dev/null
@@ -0,0 +1,339 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# cache_aligned_allocator.cpp
+__ZN3tbb8internal12NFS_AllocateEmmPv
+__ZN3tbb8internal15NFS_GetLineSizeEv
+__ZN3tbb8internal8NFS_FreeEPv
+__ZN3tbb8internal23allocate_via_handler_v3Em
+__ZN3tbb8internal25deallocate_via_handler_v3EPv
+__ZN3tbb8internal17is_malloc_used_v3Ev
+
+# task.cpp v3
+__ZN3tbb4task13note_affinityEt
+__ZN3tbb4task22internal_set_ref_countEi
+__ZN3tbb4task28internal_decrement_ref_countEv
+__ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE
+__ZN3tbb4task4selfEv
+__ZN3tbb10interface58internal9task_base7destroyERNS_4taskE
+__ZNK3tbb4task26is_owned_by_current_threadEv
+__ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE
+__ZN3tbb8internal19allocate_root_proxy8allocateEm
+__ZN3tbb8internal28affinity_partitioner_base_v36resizeEj
+__ZN3tbb8internal36get_initial_auto_partitioner_divisorEv
+__ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE
+__ZNK3tbb8internal20allocate_child_proxy8allocateEm
+__ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE
+__ZNK3tbb8internal27allocate_continuation_proxy8allocateEm
+__ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE
+__ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm
+__ZTIN3tbb4taskE
+__ZTSN3tbb4taskE
+__ZTVN3tbb4taskE
+__ZN3tbb19task_scheduler_init19default_num_threadsEv
+__ZN3tbb19task_scheduler_init10initializeEim
+__ZN3tbb19task_scheduler_init10initializeEi
+__ZN3tbb19task_scheduler_init9terminateEv
+__ZN3tbb8internal26task_scheduler_observer_v37observeEb
+__ZN3tbb10empty_task7executeEv
+__ZN3tbb10empty_taskD0Ev
+__ZN3tbb10empty_taskD1Ev
+__ZTIN3tbb10empty_taskE
+__ZTSN3tbb10empty_taskE
+__ZTVN3tbb10empty_taskE
+
+#if !TBB_NO_LEGACY
+# task_v2.cpp
+__ZN3tbb4task7destroyERS0_
+#endif
+
+# Exception handling in task scheduler
+__ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm
+__ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE
+__ZNK3tbb18task_group_context28is_group_execution_cancelledEv
+__ZN3tbb18task_group_context22cancel_group_executionEv
+__ZN3tbb18task_group_context26register_pending_exceptionEv
+__ZN3tbb18task_group_context5resetEv
+__ZN3tbb18task_group_context4initEv
+__ZN3tbb18task_group_contextD1Ev
+__ZN3tbb18task_group_contextD2Ev
+__ZNK3tbb18captured_exception4nameEv
+__ZNK3tbb18captured_exception4whatEv
+__ZN3tbb18captured_exception10throw_selfEv
+__ZN3tbb18captured_exception3setEPKcS2_
+__ZN3tbb18captured_exception4moveEv
+__ZN3tbb18captured_exception5clearEv
+__ZN3tbb18captured_exception7destroyEv
+__ZN3tbb18captured_exception8allocateEPKcS2_
+__ZN3tbb18captured_exceptionD0Ev
+__ZN3tbb18captured_exceptionD1Ev
+__ZTIN3tbb18captured_exceptionE
+__ZTSN3tbb18captured_exceptionE
+__ZTVN3tbb18captured_exceptionE
+__ZTIN3tbb13tbb_exceptionE
+__ZTSN3tbb13tbb_exceptionE
+__ZTVN3tbb13tbb_exceptionE
+
+# Symbols for exceptions thrown from TBB
+__ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev
+__ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE
+__ZNSt13runtime_errorD1Ev
+__ZTISt13runtime_error
+__ZTSSt13runtime_error
+__ZNSt16invalid_argumentD1Ev
+__ZTISt16invalid_argument
+__ZTSSt16invalid_argument
+__ZNSt11range_errorD1Ev
+__ZTISt11range_error
+__ZTSSt11range_error
+__ZNSt12length_errorD1Ev
+__ZTISt12length_error
+__ZTSSt12length_error
+__ZNSt12out_of_rangeD1Ev
+__ZTISt12out_of_range
+__ZTSSt12out_of_range
+__ZN3tbb14bad_last_allocD0Ev
+__ZN3tbb14bad_last_allocD1Ev
+__ZNK3tbb14bad_last_alloc4whatEv
+__ZTIN3tbb14bad_last_allocE
+__ZTSN3tbb14bad_last_allocE
+__ZTVN3tbb14bad_last_allocE
+__ZN3tbb12missing_waitD0Ev
+__ZN3tbb12missing_waitD1Ev
+__ZNK3tbb12missing_wait4whatEv
+__ZTIN3tbb12missing_waitE
+__ZTSN3tbb12missing_waitE
+__ZTVN3tbb12missing_waitE
+__ZN3tbb27invalid_multiple_schedulingD0Ev
+__ZN3tbb27invalid_multiple_schedulingD1Ev
+__ZNK3tbb27invalid_multiple_scheduling4whatEv
+__ZTIN3tbb27invalid_multiple_schedulingE
+__ZTSN3tbb27invalid_multiple_schedulingE
+__ZTVN3tbb27invalid_multiple_schedulingE
+__ZN3tbb13improper_lockD0Ev
+__ZN3tbb13improper_lockD1Ev
+__ZNK3tbb13improper_lock4whatEv
+__ZTIN3tbb13improper_lockE
+__ZTSN3tbb13improper_lockE
+__ZTVN3tbb13improper_lockE
+
+# tbb_misc.cpp
+__ZN3tbb17assertion_failureEPKciS1_S1_
+__ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E
+__ZN3tbb8internal13handle_perrorEiPKc
+__ZN3tbb8internal15runtime_warningEPKcz
+_TBB_runtime_interface_version
+
+# itt_notify.cpp
+__ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv
+__ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_
+__ZN3tbb8internal19itt_load_pointer_v3EPKv
+__ZN3tbb8internal20itt_set_sync_name_v3EPvPKc
+
+# pipeline.cpp
+__ZTIN3tbb6filterE
+__ZTSN3tbb6filterE
+__ZTVN3tbb6filterE
+__ZN3tbb6filterD2Ev
+__ZN3tbb8pipeline10add_filterERNS_6filterE
+__ZN3tbb8pipeline12inject_tokenERNS_4taskE
+__ZN3tbb8pipeline13remove_filterERNS_6filterE
+__ZN3tbb8pipeline3runEm
+__ZN3tbb8pipeline3runEmRNS_18task_group_contextE
+__ZN3tbb8pipeline5clearEv
+__ZN3tbb19thread_bound_filter12process_itemEv
+__ZN3tbb19thread_bound_filter16try_process_itemEv
+__ZN3tbb8pipelineC1Ev
+__ZN3tbb8pipelineC2Ev
+__ZN3tbb8pipelineD0Ev
+__ZN3tbb8pipelineD1Ev
+__ZN3tbb8pipelineD2Ev
+__ZTIN3tbb8pipelineE
+__ZTSN3tbb8pipelineE
+__ZTVN3tbb8pipelineE
+
+# queuing_rw_mutex.cpp
+__ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b
+__ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv
+__ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b
+__ZN3tbb16queuing_rw_mutex18internal_constructEv
+
+# reader_writer_lock.cpp
+__ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_
+__ZN3tbb10interface518reader_writer_lock13try_lock_readEv
+__ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_
+__ZN3tbb10interface518reader_writer_lock16internal_destroyEv
+__ZN3tbb10interface518reader_writer_lock18internal_constructEv
+__ZN3tbb10interface518reader_writer_lock4lockEv
+__ZN3tbb10interface518reader_writer_lock6unlockEv
+__ZN3tbb10interface518reader_writer_lock8try_lockEv
+__ZN3tbb10interface518reader_writer_lock9lock_readEv
+
+#if !TBB_NO_LEGACY
+# spin_rw_mutex.cpp v2
+__ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_
+__ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_
+__ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_
+__ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_
+__ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_
+__ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_
+__ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_
+__ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_
+__ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_
+#endif
+
+# spin_rw_mutex v3
+__ZN3tbb16spin_rw_mutex_v316internal_upgradeEv
+__ZN3tbb16spin_rw_mutex_v318internal_downgradeEv
+__ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv
+__ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv
+__ZN3tbb16spin_rw_mutex_v323internal_release_readerEv
+__ZN3tbb16spin_rw_mutex_v323internal_release_writerEv
+__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv
+__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv
+__ZN3tbb16spin_rw_mutex_v318internal_constructEv
+
+# spin_mutex.cpp
+__ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv
+__ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb10spin_mutex18internal_constructEv
+
+# mutex.cpp
+__ZN3tbb5mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb5mutex11scoped_lock16internal_releaseEv
+__ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb5mutex16internal_destroyEv
+__ZN3tbb5mutex18internal_constructEv
+
+# recursive_mutex.cpp
+__ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_
+__ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv
+__ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_
+__ZN3tbb15recursive_mutex16internal_destroyEv
+__ZN3tbb15recursive_mutex18internal_constructEv
+
+# queuing_mutex.cpp
+__ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_
+__ZN3tbb13queuing_mutex11scoped_lock7releaseEv
+__ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_
+__ZN3tbb13queuing_mutex18internal_constructEv
+
+# critical_section.cpp
+__ZN3tbb8internal19critical_section_v418internal_constructEv
+
+#if !TBB_NO_LEGACY
+# concurrent_hash_map
+__ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv
+
+# concurrent_queue.cpp v2
+__ZN3tbb8internal21concurrent_queue_base12internal_popEPv
+__ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv
+__ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm
+__ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv
+__ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv
+__ZN3tbb8internal21concurrent_queue_baseC2Em
+__ZN3tbb8internal21concurrent_queue_baseD2Ev
+__ZTIN3tbb8internal21concurrent_queue_baseE
+__ZTSN3tbb8internal21concurrent_queue_baseE
+__ZTVN3tbb8internal21concurrent_queue_baseE
+__ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_
+__ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv
+__ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE
+__ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev
+__ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv
+#endif
+
+# concurrent_queue v3
+# constructors
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em
+__ZN3tbb8internal24concurrent_queue_base_v3C2Em
+# destructors
+__ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev
+__ZN3tbb8internal24concurrent_queue_base_v3D2Ev
+# typeinfo
+__ZTIN3tbb8internal24concurrent_queue_base_v3E
+__ZTSN3tbb8internal24concurrent_queue_base_v3E
+#vtable
+__ZTVN3tbb8internal24concurrent_queue_base_v3E
+# methods
+__ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_
+__ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv
+__ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv
+__ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv
+__ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv
+__ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv
+__ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv
+__ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm
+__ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv
+__ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv
+__ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv
+__ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_
+
+#if !TBB_NO_LEGACY
+# concurrent_vector.cpp v2
+__ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE
+__ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb
+__ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_
+__ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE
+__ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm
+__ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm
+__ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE
+__ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv
+#endif
+
+# concurrent_vector v3
+__ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE
+__ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE
+__ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_
+__ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_
+__ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm
+__ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm
+__ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_
+__ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv
+__ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE
+__ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_
+__ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm
+__ZN3tbb8internal25concurrent_vector_base_v3D2Ev
+__ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE
+__ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_
+
+# tbb_thread
+__ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv
+__ZN3tbb8internal13tbb_thread_v36detachEv
+__ZN3tbb8internal16thread_get_id_v3Ev
+__ZN3tbb8internal15free_closure_v3EPv
+__ZN3tbb8internal13tbb_thread_v34joinEv
+__ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_
+__ZN3tbb8internal19allocate_closure_v3Em
+__ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_
+__ZN3tbb8internal15thread_yield_v3Ev
+__ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mailbox.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mailbox.h
new file mode 100644 (file)
index 0000000..1edd9e9
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_mailbox_H
+#define _TBB_mailbox_H
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/cache_aligned_allocator.h"
+
+#include "scheduler_common.h"
+
+namespace tbb {
+namespace internal {
+
+class mail_outbox;
+
+struct task_proxy : public task {
+    static const intptr_t pool_bit = 1;
+    static const intptr_t mailbox_bit = 2;
+    /* All but two low-order bits represent a (task*).
+       Two low-order bits mean:
+       1 = proxy is/was/will be in task pool
+       2 = proxy is/was/will be in mailbox */
+    intptr_t task_and_tag;
+
+    //! Pointer to next task_proxy in a mailbox
+    task_proxy* next_in_mailbox;
+
+    //! Mailbox to which this was mailed.
+    mail_outbox* outbox;
+};
+
+//! Internal representation of mail_outbox, without padding.
+class unpadded_mail_outbox {
+protected:
+    //! Pointer to first task_proxy in mailbox, or NULL if box is empty. 
+    task_proxy* my_first;
+
+    //! Pointer to pointer that will point to next item in the queue.  Never NULL.
+    task_proxy** my_last;
+
+    //! Owner of mailbox is not executing a task, and has drained its own task pool.
+    bool my_is_idle;
+};
+
+//! Class representing where mail is put.
+/** Padded to occupy a cache line. */
+class mail_outbox: unpadded_mail_outbox {
+    char pad[NFS_MaxLineSize-sizeof(unpadded_mail_outbox)];
+
+    task_proxy* internal_pop() {
+        //! No fence on load of my_first, because if it is NULL, there's nothing further to read from another thread.
+        task_proxy* first = my_first;
+        if( first ) {
+            // There is a first item in the mailbox.  See if there is a second.
+            if( task_proxy* second = __TBB_load_with_acquire(first->next_in_mailbox) ) {
+                // There are at least two items, so first item can be popped easily.
+                __TBB_store_with_release( my_first, second );
+            } else {
+                // There is only one item.  Some care is required to pop it.
+                my_first = NULL;
+                if( (task_proxy**)__TBB_CompareAndSwapW(&my_last, (intptr_t)&my_first,
+                    (intptr_t)&first->next_in_mailbox)==&first->next_in_mailbox ) 
+                {
+                    // Successfully transitioned mailbox from having one item to having none.
+                    __TBB_ASSERT(!first->next_in_mailbox,NULL);
+                } else {
+                    // Some other thread updated my_last but has not filled in result->next_in_mailbox
+                    // Wait until first item points to second item.
+                    atomic_backoff backoff;
+                    while( !(second=const_cast<volatile task_proxy*>(first)->next_in_mailbox) ) 
+                        backoff.pause();
+                    my_first = second;
+                } 
+            }
+        }
+        return first;
+    }
+public:
+    friend class mail_inbox;
+
+    //! Push task_proxy onto the mailbox queue of another thread.
+    /** Implementation is wait-free. */
+    void push( task_proxy& t ) {
+        __TBB_ASSERT(&t, NULL);
+        t.next_in_mailbox = NULL; 
+        task_proxy** link = (task_proxy**)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t.next_in_mailbox);
+        // No release fence required for the next store, because there are no memory operations 
+        // between the previous fully fenced atomic operation and the store.
+        *link = &t;
+    }
+
+    //! Construct *this as a mailbox from zeroed memory.
+    /** Raise assertion if *this is not previously zeored, or sizeof(this) is wrong.
+        This method is provided instead of a full constructor since we know the objecxt
+        will be constructed in zeroed memory. */
+    void construct() {
+        __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL );
+        __TBB_ASSERT( !my_first, NULL );
+        __TBB_ASSERT( !my_last, NULL );
+        __TBB_ASSERT( !my_is_idle, NULL );
+        my_last=&my_first;
+    }
+
+    //! Drain the mailbox 
+    intptr_t drain() {
+        intptr_t k = 0;
+        // No fences here because other threads have already quit.
+        for( ; task_proxy* t = my_first; ++k ) {
+            my_first = t->next_in_mailbox;
+            NFS_Free((char*)t - task_prefix_reservation_size);
+        }
+        return k;  
+    }
+
+    //! True if thread that owns this mailbox is looking for work.
+    bool recipient_is_idle() {
+        return my_is_idle;
+    }
+}; // class mail_outbox
+
+//! Class representing source of mail.
+class mail_inbox {
+    //! Corresponding sink where mail that we receive will be put.
+    mail_outbox* my_putter;
+public:
+    //! Construct unattached inbox
+    mail_inbox() : my_putter(NULL) {}
+
+    //! Attach inbox to a corresponding outbox. 
+    void attach( mail_outbox& putter ) {
+        __TBB_ASSERT(!my_putter,"already attached");
+        my_putter = &putter;
+    }
+    //! Detach inbox from its outbox
+    void detach() {
+        __TBB_ASSERT(my_putter,"not attached");
+        my_putter = NULL;
+    }
+    //! Get next piece of mail, or NULL if mailbox is empty.
+    task_proxy* pop() {
+        return my_putter->internal_pop();
+    }
+    //! Indicate whether thread that reads this mailbox is idle.
+    /** Raises assertion failure if mailbox is redundantly marked as not idle. */
+    void set_is_idle( bool value ) {
+        if( my_putter ) {
+            __TBB_ASSERT( my_putter->my_is_idle || value, "attempt to redundantly mark mailbox as not idle" );
+            my_putter->my_is_idle = value;
+        }
+    }
+    //! Indicate whether thread that reads this mailbox is idle.
+    bool is_idle_state ( bool value ) const {
+        return !my_putter || my_putter->my_is_idle == value;
+    }
+
+#if DO_ITT_NOTIFY
+    //! Get pointer to corresponding outbox used for ITT_NOTIFY calls.
+    void* outbox() const {return my_putter;}
+#endif /* DO_ITT_NOTIFY */ 
+}; // class mail_inbox
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_mailbox_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.cpp
new file mode 100644 (file)
index 0000000..124f9ac
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_stddef.h"
+
+#if __TBB_ARENA_PER_MASTER
+
+#include "market.h"
+#include "tbb_main.h"
+#include "governor.h"
+#include "scheduler.h"
+#include "itt_notify.h"
+
+namespace tbb {
+namespace internal {
+
+//------------------------------------------------------------------------
+// market
+//------------------------------------------------------------------------
+
+market::market ( unsigned max_num_workers, size_t stack_size )
+    : my_ref_count(1)
+    , my_stack_size(stack_size)
+    , my_max_num_workers(max_num_workers)
+{
+    my_next_arena = my_arenas.begin();
+
+    // Once created RML server will start initializing workers that will need 
+    // global market instance to get worker stack size
+    my_server = governor::create_rml_server( *this );
+    __TBB_ASSERT( my_server, "Failed to create RML server" );
+}
+
+
+market& market::global_market ( unsigned max_num_workers, size_t stack_size ) {
+    global_market_mutex_type::scoped_lock lock( theMarketMutex );
+    market *m = theMarket;
+    if ( m ) {
+        ++m->my_ref_count;
+        if ( m->my_stack_size < stack_size )
+            runtime_warning( "Newer master request for larger stack cannot be satisfied\n" );
+    }
+    else {
+        max_num_workers = max( governor::default_num_threads() - 1, max_num_workers );
+        // at least 1 worker is required to support starvation resistant tasks
+        if( max_num_workers==0 ) max_num_workers = 1;
+        // Create the global market instance
+        size_t size = sizeof(market);
+#if __TBB_TASK_GROUP_CONTEXT
+        __TBB_ASSERT( __TBB_offsetof(market, my_workers) + sizeof(generic_scheduler*) == sizeof(market),
+                      "my_workers must be the last data field of the market class");
+        size += sizeof(generic_scheduler*) * (max_num_workers - 1);
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+        __TBB_InitOnce::add_ref();
+        void* storage = NFS_Allocate(size, 1, NULL);
+        memset( storage, 0, size );
+        // Initialize and publish global market
+        m = new (storage) market( max_num_workers, stack_size );
+        theMarket = m;
+    }
+    return *m;
+}
+
+void market::destroy () {
+#if __TBB_COUNT_TASK_NODES
+    if ( my_task_node_count )
+        runtime_warning( "Leaked %ld task objects\n", (intptr_t)my_task_node_count );
+#endif /* __TBB_COUNT_TASK_NODES */
+    this->~market();
+    NFS_Free( this );
+    __TBB_InitOnce::remove_ref();
+}
+
+void market::release () {
+    __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" );
+    bool do_release = false;
+    {
+        global_market_mutex_type::scoped_lock lock(theMarketMutex);
+        if ( --my_ref_count == 0 ) {
+            do_release = true;
+            theMarket = NULL;
+        }
+    }
+    if( do_release )
+        my_server->request_close_connection();
+}
+
+arena& market::create_arena ( unsigned max_num_workers, size_t stack_size ) {
+    market &m = global_market( max_num_workers, stack_size ); // increases market's ref count
+    arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers) );
+    // Add newly created arena into the existing market's list.
+    spin_mutex::scoped_lock lock(m.my_arenas_list_mutex);
+    m.my_arenas.push_front( a );
+    if ( m.my_arenas.size() == 1 )
+        m.my_next_arena = m.my_arenas.begin();
+    return a;
+}
+
+void market::detach_arena ( arena& a ) {
+    __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" );
+    spin_mutex::scoped_lock lock(my_arenas_list_mutex);
+    __TBB_ASSERT( my_next_arena != my_arenas.end(), NULL );
+    if ( &*my_next_arena == &a )
+        if ( ++my_next_arena == my_arenas.end() && my_arenas.size() > 1 )
+            my_next_arena = my_arenas.begin();
+    my_arenas.remove( a );
+}
+
+arena* market::arena_in_need () {
+    spin_mutex::scoped_lock lock(my_arenas_list_mutex);
+    if ( my_arenas.empty() )
+        return NULL;
+    __TBB_ASSERT( my_next_arena != my_arenas.end(), NULL );
+    arena_list_type::iterator it = my_next_arena;
+    do {
+        arena& a = *it;
+        if ( ++it == my_arenas.end() )
+            it = my_arenas.begin();
+        if ( a.num_workers_active() < a.my_num_workers_allotted ) {
+            ++a.my_num_threads_active;
+            my_next_arena = it;
+            return &a;
+        }
+    } while ( it != my_next_arena );
+    return NULL;
+}
+
+void market::update_allotment ( int max_workers ) {
+    unsigned carry = 0;
+    spin_mutex::scoped_lock lock(my_arenas_list_mutex);
+    arena_list_type::iterator it = my_arenas.begin();
+    int total_demand = my_total_demand;
+    max_workers = min(max_workers, total_demand);
+    if ( total_demand > 0 ) {
+        for ( ; it != my_arenas.end(); ++it ) {
+            arena& a = *it;
+            int tmp = a.my_num_workers_requested * max_workers + carry;
+            int allotted = tmp / total_demand;
+            carry = tmp % total_demand;
+            a.my_num_workers_allotted = min( allotted, (int)a.my_max_num_workers );
+        }
+    }
+    else {
+        for ( ; it != my_arenas.end(); ++it ) {
+            it->my_num_workers_allotted = 0;
+        }
+    }
+}
+
+/** The balancing algorithm may be liable to data races. However the aberrations 
+    caused by the races are not fatal and generally only temporarily affect fairness 
+    of the workers distribution among arenas. **/
+void market::adjust_demand ( arena& a, int delta ) {
+    __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" );
+    a.my_num_workers_requested += delta;
+    my_total_demand += delta;
+    update_allotment( my_max_num_workers );
+    // Must be called outside of any locks
+    my_server->adjust_job_count_estimate( delta );
+    GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 );
+}
+
+void market::process( job& j ) {
+    generic_scheduler& s = static_cast<generic_scheduler&>(j);
+    while ( arena *a = arena_in_need() )
+        a->process(s);
+    GATHER_STATISTIC( ++s.my_counters.market_roundtrips );
+}
+
+void market::cleanup( job& j ) {
+    __TBB_ASSERT( theMarket != this, NULL );
+    generic_scheduler& s = static_cast<generic_scheduler&>(j);
+    generic_scheduler* mine = governor::local_scheduler_if_initialized();
+    __TBB_ASSERT( !mine || mine->arena_index!=0, NULL );
+    if( mine!=&s ) {
+        governor::assume_scheduler( &s );
+        generic_scheduler::cleanup_worker( &s, mine!=NULL );
+        governor::assume_scheduler( mine );
+    } else {
+        generic_scheduler::cleanup_worker( &s, true );
+    }
+}
+
+void market::acknowledge_close_connection() {
+    destroy();
+}
+
+::rml::job* market::create_one_job() {
+    unsigned index = ++my_num_workers;
+    __TBB_ASSERT( index > 0, NULL );
+    ITT_THREAD_SET_NAME(_T("TBB Worker Thread"));
+    // index serves as a hint decreasing conflicts between workers when they migrate between arenas
+    generic_scheduler* s = generic_scheduler::create_worker( *this, index );
+#if __TBB_TASK_GROUP_CONTEXT
+    __TBB_ASSERT( !my_workers[index - 1], NULL );
+    my_workers[index - 1] = s;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    governor::sign_on(s);
+    return s;
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+/** Propagates cancellation down the tree of dependent contexts by walking each 
+    thread's local list of contexts **/
+void market::propagate_cancellation ( task_group_context& ctx ) {
+    __TBB_ASSERT ( ctx.my_cancellation_requested, "No cancellation request in the context" );
+    // The whole propagation algorithm is under the lock in order to ensure correctness 
+    // in case of parallel cancellations at the different levels of the context tree.
+    // See the note 1 at the bottom of this file.
+    global_market_mutex_type::scoped_lock lock(theMarketMutex);
+    // Advance global cancellation epoch
+    __TBB_FetchAndAddWrelease(&global_cancel_count, 1);
+    // Propagate to all workers and masters and sync up their local epochs with the global one
+    unsigned num_workers = my_num_workers;
+    for ( unsigned i = 0; i < num_workers; ++i ) {
+        generic_scheduler *s = my_workers[i];
+        // If the worker is only about to be registered, skip it.
+        if ( s )
+            s->propagate_cancellation();
+    }
+    arena_list_type::iterator it = my_arenas.begin();
+    for ( ; it != my_arenas.end(); ++it ) {
+        generic_scheduler *s = it->slot[0].my_scheduler;
+        // If the master is under construction, skip it.
+        if ( s )
+            s->propagate_cancellation();
+    }
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#if __TBB_COUNT_TASK_NODES 
+intptr_t market::workers_task_node_count() {
+    intptr_t result = 0;
+    spin_mutex::scoped_lock lock(my_arenas_list_mutex);
+    for ( arena_list_type::iterator it = my_arenas.begin(); it != my_arenas.end(); ++it )
+        result += it->workers_task_node_count();
+    return result;
+}
+#endif /* __TBB_COUNT_TASK_NODES */
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+/*
+    Notes:
+
+1.  Consider parallel cancellations at the different levels of the context tree:
+
+        Ctx1 <- Cancelled by Thread1            |- Thread2 started processing
+         |                                      |
+        Ctx2                                    |- Thread1 started processing
+         |                                   T1 |- Thread2 finishes and syncs up local counters
+        Ctx3 <- Cancelled by Thread2            |
+         |                                      |- Ctx5 is bound to Ctx2
+        Ctx4                                    |
+                                             T2 |- Thread1 reaches Ctx2
+                                             
+    Thread-propagator of each cancellation increments global counter. However the thread 
+    propagating the cancellation from the outermost context (Thread1) may be the last 
+    to finish. Which means that the local counters may be synchronized earlier (by Thread2, 
+    at Time1) than it propagated cancellation into Ctx2 (at time Time2). If a new context 
+    (Ctx5) is created and bound to Ctx2 between Time1 and Time2, checking its parent only 
+    (Ctx2) may result in cancellation request being lost.
+
+    This issue is solved by doing the whole propagation under the lock (the_scheduler_list_mutex).
+
+    If we need more concurrency while processing parallel cancellations, we could try 
+    the following modification of the propagation algorithm:
+
+    advance global counter and remember it
+    for each thread:
+        scan thread's list of contexts
+    for each thread:
+        sync up its local counter only if the global counter has not been changed
+
+    However this version of the algorithm requires more analysis and verification.
+*/
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/market.h
new file mode 100644 (file)
index 0000000..0d70573
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_market_H
+#define _TBB_market_H
+
+#include "tbb/tbb_stddef.h"
+
+#if __TBB_ARENA_PER_MASTER
+
+#include "tbb/atomic.h"
+#include "tbb/spin_mutex.h"
+#include "../rml/include/rml_tbb.h"
+
+#include "intrusive_list.h"
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (push)
+    #pragma warning (disable: 4244)
+#endif
+
+namespace tbb {
+
+class task_group_context;
+
+namespace internal {
+
+class arena;
+class generic_scheduler;
+
+//------------------------------------------------------------------------
+// Class market
+//------------------------------------------------------------------------
+
+class market : no_copy, rml::tbb_client {
+    friend void ITT_DoUnsafeOneTimeInitialization ();
+
+    typedef intrusive_list<arena> arena_list_type;
+
+    //! Currently active global market
+    static market* theMarket;
+
+    typedef spin_mutex global_market_mutex_type;
+
+    //! Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation
+    static global_market_mutex_type  theMarketMutex;
+
+    //! Reference count controlling market object lifetime
+    intptr_t my_ref_count;
+
+    //! List of active arenas
+    arena_list_type my_arenas;
+
+    //! The first arena to be checked when idle worker seeks for an arena to enter
+    /** The check happens in round-robin fashion. **/
+    arena_list_type::iterator my_next_arena;
+
+    //! Lightweight mutex guarding accounting operations with arenas list
+    spin_mutex  my_arenas_list_mutex;
+
+    //! Number of workers that were requested by all arenas
+    atomic<int> my_total_demand;
+
+    //! Pointer to the RML server object that services this TBB instance.
+    rml::tbb_server* my_server;
+
+    //! Stack size of worker threads
+    size_t my_stack_size;
+
+    //! Number of workers requested from the underlying resource manager
+    unsigned my_max_num_workers;
+
+#if __TBB_COUNT_TASK_NODES
+    //! Net number of nodes that have been allocated from heap.
+    /** Updated each time a scheduler or arena is destroyed. */
+    atomic<intptr_t> my_task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+
+    //! Number of workers that have been delivered by RML
+    atomic<unsigned> my_num_workers;
+
+    //! Constructor
+    market ( unsigned max_num_workers, size_t stack_size );
+
+    //! Factory method creating new market object
+    static market& global_market ( unsigned max_num_workers, size_t stack_size );
+
+    //! Destroys and deallocates market object created by market::create()
+    void destroy ();
+
+    //! Returns next arena that needs more workers, or NULL.
+    arena* arena_in_need ();
+
+    //! Recalculates the number of workers assigned to each arena.
+    /** The actual number of workers servicing a particular arena may temporarily 
+        deviate from the calculated value. **/
+    void update_allotment ( int max_workers );
+
+    //! Returns number of masters doing computational (CPU-intensive) work
+    int num_active_masters () { return 1; }  // APM TODO: replace with a real mechanism
+
+    // // //
+    // Implementation of rml::tbb_client interface methods
+
+    /*override*/ version_type version () const { return 0; }
+
+    /*override*/ unsigned max_job_count () const { return my_max_num_workers; }
+
+    /*override*/ size_t min_stack_size () const { return worker_stack_size(); }
+
+    /*override*/ policy_type policy () const { return throughput; }
+
+    /*override*/ job* create_one_job ();
+
+    /*override*/ void cleanup( job& j );
+
+    /*override*/ void acknowledge_close_connection ();
+
+    /*override*/ void process( job& j );
+
+public:
+    //! Creates an arena object
+    /** If necessary, also creates global market instance, and boosts its ref count.
+        Each call to create_arena() must be matched by the call to arena::free_arena(). **/
+    static arena& create_arena ( unsigned max_num_workers, size_t stack_size );
+
+    //! Removes the arena from the market's list
+    void detach_arena ( arena& );
+
+    //! Decrements market's refcount and destroys it in the end
+    void release ();
+
+    //! Request that arena's need in workers should be adjusted.
+    /** Concurrent invocations are possible only on behalf of different arenas. **/
+    void adjust_demand ( arena&, int delta );
+
+    //! Returns the requested stack size of worker threads.
+    size_t worker_stack_size () const { return my_stack_size; }
+
+#if __TBB_COUNT_TASK_NODES
+    //! Returns the number of task objects "living" in worker threads
+    intptr_t workers_task_node_count();
+
+    //! Net number of nodes that have been allocated from heap.
+    /** Updated each time a scheduler or arena is destroyed. */
+    void update_task_node_count( intptr_t delta ) { my_task_node_count += delta; }
+#endif /* __TBB_COUNT_TASK_NODES */
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Propagates cancellation request to all descendants of the context.
+    void propagate_cancellation ( task_group_context& ctx );
+
+    //! Array of pointers to the registered workers
+    /** Used by cancellation propagation mechanism.
+        Must be the last data member of the class market. **/
+    generic_scheduler* my_workers[1];
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+#if __TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 )
+    //! register master with the resource manager
+    void register_master( ::rml::server::execution_resource_t& rsc_handle ) {
+        __TBB_ASSERT( my_server, "RML server not defined?" );
+        // the server may ignore registration and set master_exec_resource to NULL.
+        my_server->register_master( rsc_handle );
+    }
+
+    //! unregister master with the resource manager
+    void unregister_master( ::rml::server::execution_resource_t& rsc_handle ) const {
+        my_server->unregister_master( rsc_handle );
+    }
+#endif /* !__TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 ) */
+
+}; // class market
+
+} // namespace internal
+} // namespace tbb
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (pop)
+#endif // warning 4244 is back
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#endif /* _TBB_market_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/mutex.cpp
new file mode 100644 (file)
index 0000000..7ade57e
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/mutex.h"
+#include "itt_notify.h"
+
+namespace tbb {
+    void mutex::scoped_lock::internal_acquire( mutex& m ) {
+
+#if _WIN32||_WIN64
+        switch( m.state ) {
+        case INITIALIZED: 
+        case HELD:
+            EnterCriticalSection( &m.impl );
+            // If a thread comes here, and another thread holds the lock, it will block
+            // in EnterCriticalSection.  When it returns from EnterCriticalSection,
+            // m.state must be set to INITIALIZED.  If the same thread tries to acquire a lock it
+            // aleady holds, the the lock is in HELD state, thus will cause the assertion to fail.
+            __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex");
+            m.state = HELD;
+            break;
+        case DESTROYED: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); 
+            break;
+        default: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state");
+            break;
+        }
+#else
+        int error_code = pthread_mutex_lock(&m.impl);
+        __TBB_ASSERT_EX(!error_code,"mutex::scoped_lock: pthread_mutex_lock failed");
+#endif /* _WIN32||_WIN64 */
+        my_mutex = &m;
+    }
+
+void mutex::scoped_lock::internal_release() {
+    __TBB_ASSERT( my_mutex, "mutex::scoped_lock: not holding a mutex" );
+#if _WIN32||_WIN64    
+     switch( my_mutex->state ) {
+        case INITIALIZED: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: try to release the lock without acquisition");
+            break;
+        case HELD:
+            my_mutex->state = INITIALIZED;
+            LeaveCriticalSection(&my_mutex->impl);
+            break;
+        case DESTROYED: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); 
+            break;
+        default: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state");
+            break;
+    }
+#else
+     int error_code = pthread_mutex_unlock(&my_mutex->impl);
+     __TBB_ASSERT_EX(!error_code, "mutex::scoped_lock: pthread_mutex_unlock failed");
+#endif /* _WIN32||_WIN64 */
+     my_mutex = NULL;
+}
+
+bool mutex::scoped_lock::internal_try_acquire( mutex& m ) {
+#if _WIN32||_WIN64
+    switch( m.state ) {
+        case INITIALIZED: 
+        case HELD:
+            break;
+        case DESTROYED: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); 
+            break;
+        default: 
+            __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state");
+            break;
+    }
+#endif /* _WIN32||_WIN64 */
+
+    bool result;
+#if _WIN32||_WIN64
+    result = TryEnterCriticalSection(&m.impl)!=0;
+    if( result ) {
+        __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex");
+        m.state = HELD;
+    }
+#else
+    result = pthread_mutex_trylock(&m.impl)==0;
+#endif /* _WIN32||_WIN64 */
+    if( result ) 
+        my_mutex = &m;
+    return result;
+}
+
+void mutex::internal_construct() {
+#if _WIN32||_WIN64
+    InitializeCriticalSection(&impl);
+    state = INITIALIZED;  
+#else
+    int error_code = pthread_mutex_init(&impl,NULL);
+    if( error_code )
+        tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed");
+#endif /* _WIN32||_WIN64*/    
+    ITT_SYNC_CREATE(&impl, _T("tbb::mutex"), _T(""));
+}
+
+void mutex::internal_destroy() {
+#if _WIN32||_WIN64
+    switch( state ) {
+      case INITIALIZED:
+        DeleteCriticalSection(&impl);
+       break;
+      case DESTROYED: 
+        __TBB_ASSERT(false,"mutex: already destroyed");
+        break;
+      default: 
+        __TBB_ASSERT(false,"mutex: illegal state for destruction");
+        break;
+    }
+    state = DESTROYED;
+#else
+    int error_code = pthread_mutex_destroy(&impl); 
+    __TBB_ASSERT_EX(!error_code,"mutex: pthread_mutex_destroy failed");
+#endif /* _WIN32||_WIN64 */
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.cpp
new file mode 100644 (file)
index 0000000..ddb8321
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+
+#if __TBB_SCHEDULER_OBSERVER
+
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/aligned_space.h"
+
+#include "observer_proxy.h"
+#include "tbb_main.h"
+#include "governor.h"
+#include "scheduler.h"
+
+namespace tbb {
+namespace internal {
+
+typedef spin_rw_mutex::scoped_lock task_scheduler_observer_mutex_scoped_lock;
+
+/** aligned_space used here to shut up warnings when mutex destructor is called while threads are still using it. */
+static aligned_space<spin_rw_mutex,1> the_task_scheduler_observer_mutex;
+static observer_proxy* global_first_observer_proxy;
+observer_proxy* global_last_observer_proxy;
+
+
+#if TBB_USE_ASSERT
+static atomic<int> observer_proxy_count;
+
+struct check_observer_proxy_count {
+    ~check_observer_proxy_count() {
+        if( observer_proxy_count!=0 ) {
+            runtime_warning( "Leaked %ld observer_proxy objects\n", long(observer_proxy_count) );
+        }
+    }
+};
+
+static check_observer_proxy_count the_check_observer_proxy_count;
+#endif /* TBB_USE_ASSERT */
+
+observer_proxy::observer_proxy( task_scheduler_observer_v3& tso ) : next(NULL), observer(&tso) {
+#if TBB_USE_ASSERT
+    ++observer_proxy_count;
+#endif /* TBB_USE_ASSERT */
+    // 1 for observer
+    gc_ref_count = 1;
+    {
+        // Append to the global list
+        task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true);
+        observer_proxy* p = global_last_observer_proxy;
+        prev = p;
+        if( p ) 
+            p->next=this;
+        else 
+            global_first_observer_proxy = this;
+        global_last_observer_proxy = this;
+    }
+}
+
+void observer_proxy::remove_from_list() {
+    // Take myself off the global list.  
+    if( next ) 
+        next->prev = prev;
+    else 
+        global_last_observer_proxy = prev;
+    if( prev )
+        prev->next = next;
+    else 
+        global_first_observer_proxy = next;
+#if TBB_USE_ASSERT
+    poison_pointer(prev);
+    poison_pointer(next);
+    gc_ref_count = -666;
+#endif /* TBB_USE_ASSERT */
+}
+
+void observer_proxy::remove_ref_slow() {
+    int r = gc_ref_count;
+    while(r>1) {
+        __TBB_ASSERT( r!=0, NULL );
+        int r_old = gc_ref_count.compare_and_swap(r-1,r);
+        if( r_old==r ) {
+            // Successfully decremented count.
+            return;
+        } 
+        r = r_old;
+    } 
+    __TBB_ASSERT( r==1, NULL );
+    // Reference count might go to zero
+    {
+        task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true);
+        r = --gc_ref_count;
+        if( !r ) {
+            remove_from_list();
+        } 
+    }
+    if( !r ) {
+        __TBB_ASSERT( gc_ref_count == -666, NULL );
+#if TBB_USE_ASSERT
+        --observer_proxy_count;
+#endif /* TBB_USE_ASSERT */
+        delete this;
+    }
+}
+
+observer_proxy* observer_proxy::process_list( observer_proxy* local_last, bool is_worker, bool is_entry ) {
+    // Pointer p marches though the list.
+    // If is_entry, start with our previous list position, otherwise start at beginning of list.
+    observer_proxy* p = is_entry ? local_last : NULL;
+    for(;;) { 
+        task_scheduler_observer* tso=NULL;
+        // Hold lock on list only long enough to advance to next proxy in list.
+        { 
+            task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/false);
+            do {
+                if( local_last && local_last->observer ) {
+                    // 2 = 1 for observer and 1 for local_last
+                    __TBB_ASSERT( local_last->gc_ref_count>=2, NULL );  
+                    // Can decrement count quickly, because it cannot become zero here.
+                    --local_last->gc_ref_count;
+                    local_last = NULL;
+                } else {
+                    // Use slow form of decrementing the reference count, after lock is released.
+                }  
+                if( p ) {
+                    // We were already processing the list.
+                    if( observer_proxy* q = p->next ) {
+                        // Step to next item in list.
+                        p=q;
+                    } else {
+                        // At end of list.
+                        if( is_entry ) {  
+                            // Remember current position in the list, so we can start at on the next call.
+                            ++p->gc_ref_count;
+                        } else {
+                            // Finishin running off the end of the list
+                            p=NULL;
+                        }
+                        goto done;
+                    }
+                } else {
+                    // Starting pass through the list
+                    p = global_first_observer_proxy;
+                    if( !p ) 
+                        goto done;
+                } 
+                tso = p->observer;
+            } while( !tso );
+            ++p->gc_ref_count;
+            ++tso->my_busy_count;
+        }
+        __TBB_ASSERT( !local_last || p!=local_last, NULL );
+        if( local_last )
+            local_last->remove_ref_slow();
+        // Do not hold any locks on the list while calling user's code.
+        __TBB_TRY {    
+            if( is_entry )
+                tso->on_scheduler_entry( is_worker );
+            else
+                tso->on_scheduler_exit( is_worker );
+        } __TBB_CATCH(...) {
+            // Suppress exception, because user routines are supposed to be observing, not changing
+            // behavior of a master or worker thread.
+#if TBB_USE_ASSERT
+            runtime_warning( "%s threw exception\n", is_entry ? "on_scheduler_entry" : "on_scheduler_exit"); 
+#endif /* __TBB_USE_ASSERT */        
+        }
+        intptr_t bc = --tso->my_busy_count;
+        __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" );
+        local_last = p;
+    }
+done:
+    // Return new value to be used as local_last next time.
+    if( local_last )
+        local_last->remove_ref_slow();
+    __TBB_ASSERT( !p || is_entry, NULL );
+    return p;
+}
+
+void task_scheduler_observer_v3::observe( bool state ) {
+    if( state ) {
+        if( !my_proxy ) {
+            if( !__TBB_InitOnce::initialization_done() )
+                DoOneTimeInitializations();
+            my_busy_count = 0;
+            my_proxy = new observer_proxy(*this);
+            if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) {
+                // Notify newly created observer of its own thread.
+                // Any other pending observers are notified too.
+                s->notify_entry_observers();
+            }
+        } 
+    } else {
+        if( observer_proxy* proxy = my_proxy ) {
+            my_proxy = NULL;
+            __TBB_ASSERT( proxy->gc_ref_count>=1, "reference for observer missing" );
+            {
+                task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true);
+                proxy->observer = NULL;
+            }
+            proxy->remove_ref_slow();
+            while( my_busy_count ) {
+                __TBB_Yield();
+            }
+        }
+    }
+}
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_SCHEDULER_OBSERVER */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/observer_proxy.h
new file mode 100644 (file)
index 0000000..04e36b5
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_observer_proxy_H
+#define _TBB_observer_proxy_H
+
+#if __TBB_SCHEDULER_OBSERVER
+
+#include "tbb/task_scheduler_observer.h"
+
+namespace tbb {
+namespace internal {
+
+class observer_proxy {
+    friend class task_scheduler_observer_v3;
+    //! Reference count used for garbage collection.
+    /** 1 for reference from my task_scheduler_observer.
+        1 for each local_last_observer_proxy that points to me. 
+        No accounting for predecessor in the global list. 
+        No accounting for global_last_observer_proxy that points to me. */
+    atomic<int> gc_ref_count;
+    //! Pointer to next task_scheduler_observer 
+    /** Valid even when *this has been removed from the global list. */
+    observer_proxy* next; 
+    //! Pointer to previous task_scheduler_observer in global list.
+    observer_proxy* prev; 
+    //! Associated observer
+    task_scheduler_observer* observer;
+    //! Account for removing reference from p.  No effect if p is NULL.
+    void remove_ref_slow();
+    void remove_from_list(); 
+    observer_proxy( task_scheduler_observer_v3& wo ); 
+public:
+    static observer_proxy* process_list( observer_proxy* local_last, bool is_worker, bool is_entry );
+};
+
+extern observer_proxy* global_last_observer_proxy;
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+#endif /* _TBB_observer_proxy_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/pipeline.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/pipeline.cpp
new file mode 100644 (file)
index 0000000..29ec1fb
--- /dev/null
@@ -0,0 +1,748 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/pipeline.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "itt_notify.h"
+#include "semaphore.h"
+
+
+namespace tbb {
+
+namespace internal {
+
+//! This structure is used to store task information in a input buffer
+struct task_info {
+    void* my_object;
+    //! Invalid unless a task went through an ordered stage.
+    Token my_token;
+    //! False until my_token is set.
+    bool my_token_ready;
+    //! True if my_object is valid.
+    bool is_valid;
+    //! Set to initial state (no object, no token)
+    void reset() {
+        my_object = NULL;
+        my_token = 0;
+        my_token_ready = false;
+        is_valid = false;
+    }
+};
+//! A buffer of input items for a filter.
+/** Each item is a task_info, inserted into a position in the buffer corresponding to a Token. */
+class input_buffer {
+    friend class tbb::internal::pipeline_root_task;
+    friend class tbb::filter;
+    friend class tbb::thread_bound_filter;
+    friend class tbb::internal::stage_task;
+    friend class tbb::pipeline;
+
+    typedef  Token  size_type;
+
+    //! Array of deferred tasks that cannot yet start executing. 
+    task_info* array;
+
+    //! for thread-bound filter, semaphore for waiting, NULL otherwise.
+    semaphore* my_sem;
+
+    //! Size of array
+    /** Always 0 or a power of 2 */
+    size_type array_size;
+
+    //! Lowest token that can start executing.
+    /** All prior Token have already been seen. */
+    Token low_token;
+
+    //! Serializes updates.
+    spin_mutex array_mutex;
+
+    //! Resize "array".
+    /** Caller is responsible to acquiring a lock on "array_mutex". */
+    void grow( size_type minimum_size );
+
+    //! Initial size for "array"
+    /** Must be a power of 2 */
+    static const size_type initial_buffer_size = 4;
+
+    //! Used for out of order buffer, and for assigning my_token if is_ordered and my_token not already assigned
+    Token high_token;
+
+    //! True for ordered filter, false otherwise. 
+    bool is_ordered;
+
+    //! True for thread-bound filter, false otherwise. 
+    bool is_bound;
+
+    void create_sema(size_t initial_tokens) { __TBB_ASSERT(!my_sem,NULL); my_sem = new internal::semaphore(initial_tokens); }
+    void free_sema() { __TBB_ASSERT(my_sem,NULL); delete my_sem; }
+    void sema_P() { __TBB_ASSERT(my_sem,NULL); my_sem->P(); }
+    void sema_V() { __TBB_ASSERT(my_sem,NULL); my_sem->V(); }
+public:
+    //! Construct empty buffer.
+    input_buffer( bool is_ordered_, bool is_bound_ ) : 
+            array(NULL), my_sem(NULL), array_size(0),
+            low_token(0), high_token(0), 
+            is_ordered(is_ordered_), is_bound(is_bound_) {
+        grow(initial_buffer_size);
+        __TBB_ASSERT( array, NULL );
+        if(is_bound) create_sema(0);
+    }
+
+    //! Destroy the buffer.
+    ~input_buffer() {
+        __TBB_ASSERT( array, NULL );
+        cache_aligned_allocator<task_info>().deallocate(array,array_size);
+        poison_pointer( array );
+        if(my_sem) {
+            free_sema();
+            my_sem = NULL;
+        }
+    }
+
+    //! Put a token into the buffer.
+    /** If task information was placed into buffer, returns true;
+        otherwise returns false, informing the caller to create and spawn a task.
+        If input buffer owned by thread-bound filter and the item at
+        low_token was not valid, issue a V()
+        If the input_buffer is owned by a successor to a thread-bound filter,
+        the force_put parameter should be true to ensure the token is inserted
+        in the buffer.
+    */
+    bool put_token( task_info& info_, bool force_put = false ) {
+        {
+            info_.is_valid = true;
+            spin_mutex::scoped_lock lock( array_mutex );
+            Token token;
+            bool was_empty = !array[low_token&(array_size-1)].is_valid;
+            if( is_ordered ) {
+                if( !info_.my_token_ready ) {
+                    info_.my_token = high_token++;
+                    info_.my_token_ready = true;
+                }
+                token = info_.my_token;
+            } else
+                token = high_token++;
+            __TBB_ASSERT( (tokendiff_t)(token-low_token)>=0, NULL );
+            if( token!=low_token || is_bound || force_put ) {
+                // Trying to put token that is beyond low_token.
+                // Need to wait until low_token catches up before dispatching.
+                if( token-low_token>=array_size ) 
+                    grow( token-low_token+1 );
+                ITT_NOTIFY( sync_releasing, this );
+                array[token&(array_size-1)] = info_;
+                if(was_empty && is_bound) {
+                    sema_V();
+                }
+                return true;
+            }
+        }
+        return false;
+    }
+
+    //! Note that processing of a token is finished.
+    /** Fires up processing of the next token, if processing was deferred. */
+    // Using template to avoid explicit dependency on stage_task
+    // this is only called for serial filters, and is the reason for the
+    // advance parameter in return_item (we're incrementing low_token here.)
+    // Non-TBF serial stages don't advance the token at the start because the presence
+    // of the current token in the buffer keeps another stage from being spawned.
+    template<typename StageTask>
+    void note_done( Token token, StageTask& spawner ) {
+        task_info wakee;
+        wakee.reset();
+        {
+            spin_mutex::scoped_lock lock( array_mutex );
+            if( !is_ordered || token==low_token ) {
+                // Wake the next task
+                task_info& item = array[++low_token & (array_size-1)];
+                ITT_NOTIFY( sync_acquired, this );
+                wakee = item;
+                item.is_valid = false;
+            }
+        }
+        if( wakee.is_valid )
+            spawner.spawn_stage_task(wakee);
+    }
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! The method destroys all data in filters to prevent memory leaks
+    void clear( filter* my_filter ) {
+        long t=low_token;
+        for( size_type i=0; i<array_size; ++i, ++t ){
+            task_info& temp = array[t&(array_size-1)];
+            if (temp.is_valid ) {
+                my_filter->finalize(temp.my_object);
+                temp.is_valid = false;
+            }
+        }
+    }
+#endif
+
+    //! return an item, invalidate the queued item, but only advance if advance
+    //  advance == true for parallel filters.  If the filter is serial, leave the
+    // item in the buffer to keep another stage from being spawned.
+    bool return_item(task_info& info, bool advance) {
+        spin_mutex::scoped_lock lock( array_mutex );
+        task_info& item = array[low_token&(array_size-1)];
+        ITT_NOTIFY( sync_acquired, this );
+        if( item.is_valid ) {
+            info = item;
+            item.is_valid = false;
+            if (advance) low_token++;
+            return true;
+        }
+        return false;
+    }
+
+    //! true if the current low_token is valid.
+    bool has_item() { spin_mutex::scoped_lock lock(array_mutex); return array[low_token&(array_size -1)].is_valid; }
+};
+
+void input_buffer::grow( size_type minimum_size ) {
+    size_type old_size = array_size;
+    size_type new_size = old_size ? 2*old_size : initial_buffer_size;
+    while( new_size<minimum_size ) 
+        new_size*=2;
+    task_info* new_array = cache_aligned_allocator<task_info>().allocate(new_size);
+    task_info* old_array = array;
+    for( size_type i=0; i<new_size; ++i )
+        new_array[i].is_valid = false;
+    long t=low_token;
+    for( size_type i=0; i<old_size; ++i, ++t )
+        new_array[t&(new_size-1)] = old_array[t&(old_size-1)];
+    array = new_array;
+    array_size = new_size;
+    if( old_array )
+        cache_aligned_allocator<task_info>().deallocate(old_array,old_size);
+}
+
+class stage_task: public task, public task_info {
+private:
+    friend class tbb::pipeline;
+    pipeline& my_pipeline;
+    filter* my_filter;  
+    //! True if this task has not yet read the input.
+    bool my_at_start;
+
+public:
+    //! Construct stage_task for first stage in a pipeline.
+    /** Such a stage has not read any input yet. */
+    stage_task( pipeline& pipeline ) :
+        my_pipeline(pipeline), 
+        my_filter(pipeline.filter_list),
+        my_at_start(true)
+    {
+        task_info::reset();
+    }
+    //! Construct stage_task for a subsequent stage in a pipeline.
+    stage_task( pipeline& pipeline, filter* filter_, const task_info& info ) :
+        task_info(info),
+        my_pipeline(pipeline), 
+        my_filter(filter_),
+        my_at_start(false)
+    {}
+    //! Roughly equivalent to the constructor of input stage task
+    void reset() {
+        task_info::reset();
+        my_filter = my_pipeline.filter_list;
+        my_at_start = true;
+    }
+    //! The virtual task execution method
+    /*override*/ task* execute();
+#if __TBB_TASK_GROUP_CONTEXT
+    ~stage_task()    
+    {
+        if (my_filter && my_object && (my_filter->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) {
+            __TBB_ASSERT(is_cancelled(), "Trying to finalize the task that wasn't cancelled");
+            my_filter->finalize(my_object);
+            my_object = NULL;
+        }
+    }
+#endif // __TBB_TASK_GROUP_CONTEXT
+    //! Creates and spawns stage_task from task_info
+    void spawn_stage_task(const task_info& info)
+    {
+        stage_task* clone = new (allocate_additional_child_of(*parent())) 
+                                stage_task( my_pipeline, my_filter, info );
+        spawn(*clone);
+    }
+};
+
+task* stage_task::execute() {
+    __TBB_ASSERT( !my_at_start || !my_object, NULL );
+    __TBB_ASSERT( !my_filter->is_bound(), NULL );
+    if( my_at_start ) {
+        if( my_filter->is_serial() ) {
+            my_object = (*my_filter)(my_object);
+            if( my_object ) {
+                if( my_filter->is_ordered() ) {
+                    my_token = my_pipeline.token_counter++; // ideally, with relaxed semantics
+                    my_token_ready = true;
+                } else if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {
+                    if( my_pipeline.has_thread_bound_filters )
+                        my_pipeline.token_counter++; // ideally, with relaxed semantics
+                }
+                if( !my_filter->next_filter_in_pipeline ) { // we're only filter in pipeline
+                    reset();
+                    goto process_another_stage;
+                } else {
+                    ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens );
+                    if( --my_pipeline.input_tokens>0 )
+                        spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) );
+                }
+            } else {
+                my_pipeline.end_of_input = true; 
+                return NULL;
+            }
+        } else /*not is_serial*/ {
+            if( my_pipeline.end_of_input )
+                return NULL;
+            if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {
+                if( my_pipeline.has_thread_bound_filters )
+                    my_pipeline.token_counter++;
+            }
+            ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens );
+            if( --my_pipeline.input_tokens>0 )
+                spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) );
+            my_object = (*my_filter)(my_object);
+            if( !my_object ) {
+                my_pipeline.end_of_input = true; 
+                if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {
+                    if( my_pipeline.has_thread_bound_filters )
+                        my_pipeline.token_counter--;  // fix token_counter
+                }
+                return NULL;
+            }
+        }
+        my_at_start = false;
+    } else {
+        my_object = (*my_filter)(my_object);
+        if( my_filter->is_serial() )
+            my_filter->my_input_buffer->note_done(my_token, *this);
+    }
+    my_filter = my_filter->next_filter_in_pipeline; 
+    if( my_filter ) {
+        // There is another filter to execute.
+        // Crank up priority a notch.
+        add_to_depth(1);
+        if( my_filter->is_serial() ) {
+            // The next filter must execute tokens in order
+            if( my_filter->my_input_buffer->put_token(*this) ){
+                // Can't proceed with the same item
+                if( my_filter->is_bound() ) {
+                    // Find the next non-thread-bound filter
+                    do {
+                        my_filter = my_filter->next_filter_in_pipeline;
+                    } while( my_filter && my_filter->is_bound() );
+                    // Check if there is an item ready to process
+                    if( my_filter && my_filter->my_input_buffer->return_item(*this, !my_filter->is_serial()))
+                        goto process_another_stage;
+                } 
+                my_filter = NULL; // To prevent deleting my_object twice if exception occurs
+                return NULL;
+            }
+        }
+    } else {
+        // Reached end of the pipe.
+        size_t ntokens_avail = ++my_pipeline.input_tokens;
+        if(my_pipeline.filter_list->is_bound() ) {
+            if(ntokens_avail == 1) {
+                my_pipeline.filter_list->my_input_buffer->sema_V();
+            }
+            return NULL;
+        }
+        if( ntokens_avail>1  // Only recycle if there is one available token
+                || my_pipeline.end_of_input ) {
+            return NULL; // No need to recycle for new input
+        }
+        ITT_NOTIFY( sync_acquired, &my_pipeline.input_tokens );
+        // Recycle as an input stage task.
+        reset();
+    }
+process_another_stage:
+    /* A semi-hackish way to reexecute the same task object immediately without spawning.
+       recycle_as_continuation marks the task for future execution,
+       and then 'this' pointer is returned to bypass spawning. */
+    recycle_as_continuation();
+    return this;
+}
+
+class pipeline_root_task: public task {
+    pipeline& my_pipeline;
+    bool do_segment_scanning;
+
+    /*override*/ task* execute() {
+        if( !my_pipeline.end_of_input )
+            if( !my_pipeline.filter_list->is_bound() )
+                if( my_pipeline.input_tokens > 0 ) {
+                    recycle_as_continuation();
+                    set_ref_count(1);
+                    return new( allocate_child() ) stage_task( my_pipeline );
+                }
+        if( do_segment_scanning ) {
+            filter* current_filter = my_pipeline.filter_list->next_segment;
+            /* first non-thread-bound filter that follows thread-bound one 
+            and may have valid items to process */
+            filter* first_suitable_filter = current_filter;
+            while( current_filter ) {
+                __TBB_ASSERT( !current_filter->is_bound(), "filter is thread-bound?" );
+                __TBB_ASSERT( current_filter->prev_filter_in_pipeline->is_bound(), "previous filter is not thread-bound?" );
+                if( !my_pipeline.end_of_input || current_filter->has_more_work())
+                {
+                    task_info info;
+                    info.reset();
+                    if( current_filter->my_input_buffer->return_item(info, !current_filter->is_serial()) ) {
+                        set_ref_count(1);
+                        recycle_as_continuation();
+                        return new( allocate_child() ) stage_task( my_pipeline, current_filter, info);
+                    }
+                    current_filter = current_filter->next_segment;
+                    if( !current_filter ) {
+                        if( !my_pipeline.end_of_input ) {
+                            recycle_as_continuation();
+                            return this;
+                        }
+                        current_filter = first_suitable_filter;
+                        __TBB_Yield();
+                    }
+                } else { 
+                    /* The preceding pipeline segment is empty. 
+                    Fast-forward to the next post-TBF segment. */
+                    first_suitable_filter = first_suitable_filter->next_segment;
+                    current_filter = first_suitable_filter; 
+                }
+            } /* while( current_filter ) */
+            return NULL;
+        } else { 
+            if( !my_pipeline.end_of_input ) {
+                recycle_as_continuation();
+                return this;
+            }
+            return NULL;
+        }
+    }
+public:
+    pipeline_root_task( pipeline& pipeline ): my_pipeline(pipeline), do_segment_scanning(false)
+    {
+        __TBB_ASSERT( my_pipeline.filter_list, NULL );
+        filter* first = my_pipeline.filter_list;
+        if( (first->my_filter_mode & first->version_mask) >= __TBB_PIPELINE_VERSION(5) ) {
+            // Scanning the pipeline for segments 
+            filter* head_of_previous_segment = first;
+            for(  filter* subfilter=first->next_filter_in_pipeline;
+                  subfilter!=NULL;
+                  subfilter=subfilter->next_filter_in_pipeline )
+            {
+                if( subfilter->prev_filter_in_pipeline->is_bound() && !subfilter->is_bound() ) {
+                    do_segment_scanning = true;
+                    head_of_previous_segment->next_segment = subfilter;
+                    head_of_previous_segment = subfilter;
+                }
+            }
+        }
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings
+    // Suppress compiler warning about constant conditional expression
+    #pragma warning (disable: 4127)
+#endif
+
+// The class destroys end_counter and clears all input buffers if pipeline was cancelled.
+class pipeline_cleaner: internal::no_copy {
+    pipeline& my_pipeline;  
+public:
+    pipeline_cleaner(pipeline& _pipeline) : 
+        my_pipeline(_pipeline)
+    {}
+    ~pipeline_cleaner(){
+#if __TBB_TASK_GROUP_CONTEXT
+        if (my_pipeline.end_counter->is_cancelled()) // Pipeline was cancelled
+            my_pipeline.clear_filters(); 
+#endif
+        my_pipeline.end_counter = NULL;            
+    }
+};
+
+} // namespace internal
+
+void pipeline::inject_token( task& ) {
+    __TBB_ASSERT(0,"illegal call to inject_token");
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+void pipeline::clear_filters() {
+    for( filter* f = filter_list; f; f = f->next_filter_in_pipeline ) {
+        if ((f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4))
+            if( internal::input_buffer* b = f->my_input_buffer )
+                b->clear(f);
+    }
+}
+#endif
+
+pipeline::pipeline() : 
+    filter_list(NULL),
+    filter_end(NULL),
+    end_counter(NULL),
+    end_of_input(false),
+    has_thread_bound_filters(false)
+{
+    token_counter = 0;
+    input_tokens = 0;
+}
+
+pipeline::~pipeline() {
+    clear();
+}
+
+void pipeline::clear() {
+    filter* next;
+    for( filter* f = filter_list; f; f=next ) {
+        if( internal::input_buffer* b = f->my_input_buffer ) {
+            delete b; 
+            f->my_input_buffer = NULL;
+        }
+        next=f->next_filter_in_pipeline;
+        f->next_filter_in_pipeline = filter::not_in_pipeline();
+        if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) {
+            f->prev_filter_in_pipeline = filter::not_in_pipeline();
+            f->my_pipeline = NULL;
+        }
+        if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) )
+            f->next_segment = NULL;
+    }
+    filter_list = filter_end = NULL;
+}
+
+void pipeline::add_filter( filter& filter_ ) {
+#if TBB_USE_ASSERT
+    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) 
+        __TBB_ASSERT( filter_.prev_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" );
+    __TBB_ASSERT( filter_.next_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" );
+    __TBB_ASSERT( !end_counter, "invocation of add_filter on running pipeline" );
+#endif    
+    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) {
+        filter_.my_pipeline = this;
+        filter_.prev_filter_in_pipeline = filter_end;
+        if ( filter_list == NULL)
+            filter_list = &filter_;
+        else
+            filter_end->next_filter_in_pipeline = &filter_;
+        filter_.next_filter_in_pipeline = NULL;
+        filter_end = &filter_;
+    }
+    else
+    {
+        if( !filter_end )
+            filter_end = reinterpret_cast<filter*>(&filter_list);
+        
+        *reinterpret_cast<filter**>(filter_end) = &filter_;
+        filter_end = reinterpret_cast<filter*>(&filter_.next_filter_in_pipeline);
+        *reinterpret_cast<filter**>(filter_end) = NULL;
+    }
+    if( (filter_.my_filter_mode & filter_.version_mask) >= __TBB_PIPELINE_VERSION(5) ) {
+        if( filter_.is_serial() ) {
+            if( filter_.is_bound() )
+                has_thread_bound_filters = true;
+            filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), filter_.is_bound() );
+        }
+        else {
+            if( filter_.prev_filter_in_pipeline && filter_.prev_filter_in_pipeline->is_bound() )
+                filter_.my_input_buffer = new internal::input_buffer( false, false );
+        }
+    } else {
+        if( filter_.is_serial() ) {
+            filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), false );
+        }
+    }
+
+}
+
+void pipeline::remove_filter( filter& filter_ ) {
+    __TBB_ASSERT( filter_.prev_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" );
+    __TBB_ASSERT( filter_.next_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" );
+    __TBB_ASSERT( !end_counter, "invocation of remove_filter on running pipeline" );
+    if (&filter_ == filter_list) 
+        filter_list = filter_.next_filter_in_pipeline;
+    else {
+        __TBB_ASSERT( filter_.prev_filter_in_pipeline, "filter list broken?" ); 
+        filter_.prev_filter_in_pipeline->next_filter_in_pipeline = filter_.next_filter_in_pipeline;
+    }
+    if (&filter_ == filter_end)
+        filter_end = filter_.prev_filter_in_pipeline;
+    else {
+        __TBB_ASSERT( filter_.next_filter_in_pipeline, "filter list broken?" ); 
+        filter_.next_filter_in_pipeline->prev_filter_in_pipeline = filter_.prev_filter_in_pipeline;
+    }
+    if( internal::input_buffer* b = filter_.my_input_buffer ) {
+        delete b; 
+        filter_.my_input_buffer = NULL;
+    }
+    filter_.next_filter_in_pipeline = filter_.prev_filter_in_pipeline = filter::not_in_pipeline();
+    if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) )
+        filter_.next_segment = NULL;
+    filter_.my_pipeline = NULL;
+}
+
+void pipeline::run( size_t max_number_of_live_tokens
+#if __TBB_TASK_GROUP_CONTEXT
+    , tbb::task_group_context& context
+#endif
+    ) {
+    __TBB_ASSERT( max_number_of_live_tokens>0, "pipeline::run must have at least one token" );
+    __TBB_ASSERT( !end_counter, "pipeline already running?" );
+    if( filter_list ) {
+        internal::pipeline_cleaner my_pipeline_cleaner(*this);
+        end_of_input = false;
+        input_tokens = internal::Token(max_number_of_live_tokens);
+        if(has_thread_bound_filters) {
+            // release input filter if thread-bound
+            if(filter_list->is_bound()) {
+                filter_list->my_input_buffer->sema_V();
+            }
+        }
+#if __TBB_TASK_GROUP_CONTEXT            
+        end_counter = new( task::allocate_root(context) ) internal::pipeline_root_task( *this );
+#else
+        end_counter = new( task::allocate_root() ) internal::pipeline_root_task( *this );
+#endif
+        // Start execution of tasks
+        task::spawn_root_and_wait( *end_counter );
+
+        if(has_thread_bound_filters) {
+            for(filter* f = filter_list->next_filter_in_pipeline; f; f=f->next_filter_in_pipeline) {
+                if(f->is_bound()) {
+                    f->my_input_buffer->sema_V(); // wake to end
+                }
+            }
+        }
+    }
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+void pipeline::run( size_t max_number_of_live_tokens ) {
+    if( filter_list ) {
+        // Construct task group context with the exception propagation mode expected 
+        // by the pipeline caller.
+        uintptr_t ctx_traits = filter_list->my_filter_mode & filter::exact_exception_propagation ? 
+                task_group_context::default_traits :
+                task_group_context::default_traits & ~task_group_context::exact_exception;
+        task_group_context context(task_group_context::bound, ctx_traits);
+        run(max_number_of_live_tokens, context);
+    }
+}
+#endif // __TBB_TASK_GROUP_CONTEXT
+
+bool filter::has_more_work() {
+    __TBB_ASSERT(my_pipeline, NULL);
+    __TBB_ASSERT(my_input_buffer, "has_more_work() called for filter with no input buffer");
+    return (internal::tokendiff_t)(my_pipeline->token_counter - my_input_buffer->low_token) != 0;
+}
+
+filter::~filter() {
+    if ( (my_filter_mode & version_mask) >= __TBB_PIPELINE_VERSION(3) ) {
+        if ( next_filter_in_pipeline != filter::not_in_pipeline() )
+            my_pipeline->remove_filter(*this);
+        else 
+            __TBB_ASSERT( prev_filter_in_pipeline == filter::not_in_pipeline(), "probably filter list is broken" );
+    } else {
+        __TBB_ASSERT( next_filter_in_pipeline==filter::not_in_pipeline(), "cannot destroy filter that is part of pipeline" );
+    }
+}
+
+thread_bound_filter::result_type thread_bound_filter::process_item() {
+    return internal_process_item(true);
+}
+
+thread_bound_filter::result_type thread_bound_filter::try_process_item() {
+    return internal_process_item(false);
+}
+
+thread_bound_filter::result_type thread_bound_filter::internal_process_item(bool is_blocking) {
+    internal::task_info info;
+    info.reset();
+
+    if(my_pipeline && my_pipeline->end_of_input && !has_more_work())
+        return end_of_stream;
+
+    if( !prev_filter_in_pipeline ) {
+        if( my_pipeline->end_of_input )
+            return end_of_stream;
+        while(my_pipeline->input_tokens == 0) {
+            if( !is_blocking ) 
+                return item_not_available;
+            my_input_buffer->sema_P();
+        }
+        info.my_object = (*this)(info.my_object);
+        if( info.my_object ) {
+            __TBB_ASSERT(my_pipeline->input_tokens > 0, "Token failed in thread-bound filter");
+            my_pipeline->input_tokens--;
+            if( is_ordered() ) {
+                info.my_token = my_pipeline->token_counter;
+                info.my_token_ready = true;
+            }
+            my_pipeline->token_counter++; // ideally, with relaxed semantics
+        } else {
+            my_pipeline->end_of_input = true; 
+            return end_of_stream; 
+        }
+    } else { /* this is not an input filter */
+        while(!my_input_buffer->has_item()) {
+            if(!is_blocking) {
+                return item_not_available;
+            }
+            my_input_buffer->sema_P();
+            if( my_pipeline->end_of_input && !has_more_work()) {
+                return end_of_stream;
+            }
+        }
+        if(!my_input_buffer->return_item(info, /*advance*/true)) {
+            __TBB_ASSERT(0,"return_item failed");
+        }
+        info.my_object = (*this)(info.my_object);
+    }
+    if( next_filter_in_pipeline ) {
+        if (!next_filter_in_pipeline->my_input_buffer->put_token(info,/*force_put=*/true) ) {
+            __TBB_ASSERT(0, "Couldn't put token after thread-bound buffer");
+        }
+    } else {
+        size_t ntokens_avail = ++(my_pipeline->input_tokens);
+        if(my_pipeline->filter_list->is_bound()) {
+            if(ntokens_avail == 1) {
+                my_pipeline->filter_list->my_input_buffer->sema_V();
+            }
+        }
+    }
+
+    return success;
+}
+
+} // tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/private_server.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/private_server.cpp
new file mode 100644 (file)
index 0000000..4c84024
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "rml_tbb.h"
+#include "../server/thread_monitor.h"
+#include "tbb/atomic.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/tbb_thread.h"
+
+using rml::internal::thread_monitor;
+
+namespace tbb {
+namespace internal {
+namespace rml {
+
+class private_server;
+
+class private_worker: no_copy {
+    //! State in finite-state machine that controls the worker.
+    /** State diagram:
+        init --------------------\ 
+          |                      | 
+          V                      V
+        starting --> normal --> quit
+          |
+          V
+        plugged
+      */ 
+    enum state_t {
+        //! *this is initialized
+        st_init,
+        //! *this has associated thread that is starting up.
+        st_starting,
+        //! Associated thread is doing normal life sequence.
+        st_normal,
+        //! Associated thread has ended normal life sequence and promises to never touch *this again.
+        st_quit,
+        //! Associated thread should skip normal life sequence, because private_server is shutting down.
+        st_plugged
+    };
+    atomic<state_t> my_state;
+    
+    //! Associated server
+    private_server& my_server; 
+
+    //! Associated client
+    tbb_client& my_client; 
+
+    //! index used for avoiding the 64K aliasing problem
+    const size_t my_index;
+
+    //! Monitor for sleeping when there is no work to do.
+    /** The invariant that holds for sleeping workers is:
+        "my_slack<=0 && my_state==st_normal && I am on server's list of asleep threads" */
+    thread_monitor my_thread_monitor;
+
+    //! Link for list of workers that are sleeping or have no associated thread.
+    private_worker* my_next;
+
+    friend class private_server;
+
+    //! Actions executed by the associated thread 
+    void run();
+
+    //! Wake up associated thread (or launch a thread if there is none)
+    void wake_or_launch();
+
+    //! Called by a thread (usually not the associated thread) to commence termination.
+    void start_shutdown();
+
+    static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg );
+
+protected:
+    private_worker( private_server& server, tbb_client& client, const size_t i ) : 
+        my_server(server),
+        my_client(client),
+        my_index(i)
+    {
+        my_state = st_init;
+    }
+
+};
+
+static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize;
+
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous compiler warnings about uninstantiatble class
+    #pragma warning(push)
+    #pragma warning(disable:4510 4610)
+#endif
+class padded_private_worker: public private_worker {
+    char pad[cache_line_size - sizeof(private_worker)%cache_line_size];
+public:
+    padded_private_worker( private_server& server, tbb_client& client, const size_t i ) : private_worker(server,client,i) {}
+};
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning(pop)
+#endif
+
+class private_server: public tbb_server, no_copy {
+    tbb_client& my_client;
+    //! Maximum number of threads to be creatd.
+    /** Threads are created lazily, so maximum might not actually be reached. */
+    const tbb_client::size_type my_n_thread;
+
+    //! Stack size for each thread. */
+    const size_t my_stack_size;
+
+    //! Number of jobs that could use their associated thread minus number of active threads.
+    /** If negative, indicates oversubscription.
+        If positive, indicates that more threads should run. 
+        Can be lowered asynchronously, but must be raised only while holding my_asleep_list_mutex,
+        because raising it impacts the invariant for sleeping threads. */
+    atomic<int> my_slack;
+
+    //! Counter used to determine when to delete this.
+    atomic<int> my_ref_count;
+
+    padded_private_worker* my_thread_array;
+
+    //! List of workers that are asleep or committed to sleeping until notified by another thread.
+    tbb::atomic<private_worker*> my_asleep_list_root;
+
+    //! Protects my_asleep_list_root
+    tbb::spin_mutex my_asleep_list_mutex;
+
+#if TBB_USE_ASSERT
+    atomic<int> my_net_slack_requests;
+#endif /* TBB_USE_ASSERT */
+
+    //! Wake up to two sleeping workers, if there are any sleeping.
+    /** The call is used to propagate a chain reaction where each thread wakes up two threads,
+        which in turn each wake up two threads, etc. */
+    void propagate_chain_reaction() {
+        // First test of a double-check idiom.  Second test is inside wake_some(0).
+        if( my_asleep_list_root ) 
+            wake_some(0);
+    }
+
+    //! Try to add t to list of sleeping workers
+    bool try_insert_in_asleep_list( private_worker& t );
+
+    //! Equivalent of adding additional_slack to my_slack and waking up to 2 threads if my_slack permits.
+    void wake_some( int additional_slack );
+
+    virtual ~private_server();
+    
+    void remove_server_ref() {
+        if( --my_ref_count==0 ) {
+            my_client.acknowledge_close_connection();
+            this->~private_server();
+            tbb::cache_aligned_allocator<private_server>().deallocate( this, 1 );
+        } 
+    }
+
+    friend class private_worker;
+public:
+    private_server( tbb_client& client );
+
+    /*override*/ version_type version() const {
+        return 0;
+    } 
+
+    /*override*/ void request_close_connection( bool /*exiting*/ ) {
+        for( size_t i=0; i<my_n_thread; ++i ) 
+            my_thread_array[i].start_shutdown();
+        remove_server_ref();
+    }
+
+    /*override*/ void yield() {__TBB_Yield();}
+
+    /*override*/ void independent_thread_number_changed( int ) {__TBB_ASSERT(false,NULL);}
+
+    /*override*/ unsigned default_concurrency() const {return tbb::tbb_thread::hardware_concurrency()-1;}
+
+    /*override*/ void adjust_job_count_estimate( int delta );
+
+#if _WIN32||_WIN64
+    /*override*/ void register_master ( ::rml::server::execution_resource_t& ) {}
+    /*override*/ void unregister_master ( ::rml::server::execution_resource_t ) {}
+#endif /* _WIN32||_WIN64 */
+};
+
+//------------------------------------------------------------------------
+// Methods of private_worker
+//------------------------------------------------------------------------
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced
+    #pragma warning(push)
+    #pragma warning(disable:4189)
+#endif
+#if __MINGW32__ && __GNUC__==4 &&__GNUC_MINOR__>=2 && !__MINGW64__
+// ensure that stack is properly aligned for TBB threads
+__attribute__((force_align_arg_pointer))
+#endif
+__RML_DECL_THREAD_ROUTINE private_worker::thread_routine( void* arg ) {
+    private_worker* self = static_cast<private_worker*>(arg);
+    AVOID_64K_ALIASING( self->my_index );
+#if _XBOX
+    int HWThreadIndex = __TBB_XBOX360_GetHardwareThreadIndex(i);
+    XSetThreadProcessor(GetCurrentThread(), HWThreadIndex);
+#endif
+    self->run();
+    return 0;
+}
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning(pop)
+#endif
+
+void private_worker::start_shutdown() {
+    state_t s; 
+    // Transition from st_starting or st_normal to st_plugged or st_quit
+    do {
+        s = my_state;
+        __TBB_ASSERT( s==st_init||s==st_starting||s==st_normal, NULL );
+    } while( my_state.compare_and_swap( s==st_starting? st_plugged : st_quit, s )!=s );
+    if( s==st_normal ) {
+        // May have invalidated invariant for sleeping, so wake up the thread.
+        // Note that the notify() here occurs without maintaining invariants for my_slack.
+        // It does not matter, because my_state==st_quit overrides checking of my_slack.
+        my_thread_monitor.notify();
+    } else if( s==st_init ) {
+        // Perform action that otherwise would be performed by associated thread when it quits.
+        my_server.remove_server_ref();
+    }
+}
+
+void private_worker::run() {
+    my_server.propagate_chain_reaction();
+    state_t s = my_state.compare_and_swap( st_normal, st_starting );
+    if( s==st_starting ) {
+        ::rml::job& j = *my_client.create_one_job();
+        while( my_state==st_normal ) {
+            if( my_server.my_slack>=0 ) {
+                my_client.process(j);
+            } else {
+                thread_monitor::cookie c;
+                // Prepare to wait
+                my_thread_monitor.prepare_wait(c);
+                // Check/set the invariant for sleeping
+                if( my_state==st_normal && my_server.try_insert_in_asleep_list(*this) ) {
+                    my_thread_monitor.commit_wait(c);
+                    my_server.propagate_chain_reaction();
+                } else {
+                    // Invariant broken
+                    my_thread_monitor.cancel_wait();
+                }
+            }
+        }
+        my_client.cleanup(j);
+    } else {
+        // Server is already shutting down.
+        __TBB_ASSERT( s==st_plugged, NULL );
+    }
+    ++my_server.my_slack;
+    my_server.remove_server_ref();
+}
+
+inline void private_worker::wake_or_launch() {
+    if( my_state==st_init && my_state.compare_and_swap( st_starting, st_init )==st_init )
+        thread_monitor::launch( thread_routine, this, my_server.my_stack_size );
+    else
+        my_thread_monitor.notify();
+}
+
+//------------------------------------------------------------------------
+// Methods of private_server
+//------------------------------------------------------------------------
+private_server::private_server( tbb_client& client ) : 
+    my_client(client), 
+    my_n_thread(client.max_job_count()),
+    my_stack_size(client.min_stack_size()),
+    my_thread_array(NULL) 
+{
+    my_ref_count = my_n_thread+1;
+    my_slack = 0;
+#if TBB_USE_ASSERT
+    my_net_slack_requests = 0;
+#endif /* TBB_USE_ASSERT */
+    my_asleep_list_root = NULL;
+    my_thread_array = tbb::cache_aligned_allocator<padded_private_worker>().allocate( my_n_thread );
+    memset( my_thread_array, 0, sizeof(private_worker)*my_n_thread );
+    for( size_t i=0; i<my_n_thread; ++i ) {
+        private_worker* t = new( &my_thread_array[i] ) padded_private_worker( *this, client, i ); 
+        t->my_next = my_asleep_list_root;
+        my_asleep_list_root = t;
+    } 
+}
+
+private_server::~private_server() {
+    __TBB_ASSERT( my_net_slack_requests==0, NULL );
+    for( size_t i=my_n_thread; i--; ) 
+        my_thread_array[i].~padded_private_worker();
+    tbb::cache_aligned_allocator<padded_private_worker>().deallocate( my_thread_array, my_n_thread );
+    tbb::internal::poison_pointer( my_thread_array );
+}
+
+inline bool private_server::try_insert_in_asleep_list( private_worker& t ) {
+    tbb::spin_mutex::scoped_lock lock(my_asleep_list_mutex);
+    // Contribute to slack under lock so that if another takes that unit of slack,
+    // it sees us sleeping on the list and wakes us up.
+    int k = ++my_slack;
+    if( k<=0 ) {
+        t.my_next = my_asleep_list_root;
+        my_asleep_list_root = &t;
+        return true;
+    } else {
+        --my_slack;
+        return false;
+    }
+}
+
+void private_server::wake_some( int additional_slack ) {
+    __TBB_ASSERT( additional_slack>=0, NULL );
+    private_worker* wakee[2];
+    private_worker**w = wakee;
+    {
+        tbb::spin_mutex::scoped_lock lock(my_asleep_list_mutex);
+        while( my_asleep_list_root && w<wakee+2 ) {
+            if( additional_slack>0 ) {
+                --additional_slack;
+            } else {
+                // Try to claim unit of slack
+                int old;
+                do {
+                    old = my_slack;
+                    if( old<=0 ) goto done;
+                } while( my_slack.compare_and_swap(old-1,old)!=old );
+            }
+            // Pop sleeping worker to combine with claimed unit of slack
+            my_asleep_list_root = (*w++ = my_asleep_list_root)->my_next;
+        }
+        if( additional_slack ) {
+            // Contribute our unused slack to my_slack.
+            my_slack += additional_slack;
+        }
+    }
+done:
+    while( w>wakee ) 
+        (*--w)->wake_or_launch();
+}
+
+void private_server::adjust_job_count_estimate( int delta ) {
+#if TBB_USE_ASSERT
+    my_net_slack_requests+=delta;
+#endif /* TBB_USE_ASSERT */
+    if( delta<0 ) {
+        my_slack+=delta;
+    } else if( delta>0 ) {
+        wake_some( delta );
+    }
+}
+
+//! Factory method called from task.cpp to create a private_server.
+tbb_server* make_private_server( tbb_client& client ) {
+    return new( tbb::cache_aligned_allocator<private_server>().allocate(1) ) private_server(client);
+}
+        
+} // namespace rml
+} // namespace internal
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_mutex.cpp
new file mode 100644 (file)
index 0000000..4edf371
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_machine.h"
+#include "tbb/tbb_stddef.h"
+#include "tbb_misc.h"
+#include "tbb/queuing_mutex.h"
+#include "itt_notify.h"
+
+
+namespace tbb {
+
+using namespace internal;
+
+//! A method to acquire queuing_mutex lock
+void queuing_mutex::scoped_lock::acquire( queuing_mutex& m )
+{
+    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
+
+    // Must set all fields before the fetch_and_store, because once the
+    // fetch_and_store executes, *this becomes accessible to other threads.
+    mutex = &m;
+    next  = NULL;
+    going = 0;
+
+    // The fetch_and_store must have release semantics, because we are
+    // "sending" the fields initialized above to other processors.
+    scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
+    if( pred ) {
+        ITT_NOTIFY(sync_prepare, mutex);
+        __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
+        pred->next = this;
+        spin_wait_while_eq( going, 0ul );
+    }
+    ITT_NOTIFY(sync_acquired, mutex);
+
+    // Force acquire so that user's critical section receives correct values
+    // from processor that was previously in the user's critical section.
+    __TBB_load_with_acquire(going);
+}
+
+//! A method to acquire queuing_mutex if it is free
+bool queuing_mutex::scoped_lock::try_acquire( queuing_mutex& m )
+{
+    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
+
+    // Must set all fields before the fetch_and_store, because once the
+    // fetch_and_store executes, *this becomes accessible to other threads.
+    next  = NULL;
+    going = 0;
+
+    if( m.q_tail ) return false;
+    // The CAS must have release semantics, because we are
+    // "sending" the fields initialized above to other processors.
+    scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL);
+
+    // Force acquire so that user's critical section receives correct values
+    // from processor that was previously in the user's critical section.
+    // try_acquire should always have acquire semantic, even if failed.
+    __TBB_load_with_acquire(going);
+    if( !pred ) {
+        mutex = &m;
+        ITT_NOTIFY(sync_acquired, mutex);
+        return true;
+    } else return false;
+}
+
+//! A method to release queuing_mutex lock
+void queuing_mutex::scoped_lock::release( )
+{
+    __TBB_ASSERT(this->mutex!=NULL, "no lock acquired");
+
+    ITT_NOTIFY(sync_releasing, mutex);
+    if( !next ) {
+        if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
+            // this was the only item in the queue, and the queue is now empty.
+            goto done;
+        }
+        // Someone in the queue
+        spin_wait_while_eq( next, (scoped_lock*)0 );
+    }
+    __TBB_ASSERT(next,NULL);
+    __TBB_store_with_release(next->going, 1);
+done:
+    initialize();
+}
+
+void queuing_mutex::internal_construct() {
+    ITT_SYNC_CREATE(this, _T("tbb::queuing_mutex"), _T(""));
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp
new file mode 100644 (file)
index 0000000..d02f358
--- /dev/null
@@ -0,0 +1,505 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/** Before making any changes in the implementation, please emulate algorithmic changes
+    with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml.
+    There could be some code looking as "can be restructured" but its structure does matter! */
+
+#include "tbb/tbb_machine.h"
+#include "tbb/tbb_stddef.h"
+#include "tbb/tbb_machine.h"
+#include "tbb/queuing_rw_mutex.h"
+#include "itt_notify.h"
+
+
+namespace tbb {
+
+using namespace internal;
+
+//! Flag bits in a state_t that specify information about a locking request.
+enum state_t_flags {
+    STATE_NONE = 0,
+    STATE_WRITER = 1,
+    STATE_READER = 1<<1,
+    STATE_READER_UNBLOCKNEXT = 1<<2,
+    STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT,
+    STATE_ACTIVEREADER = 1<<3,
+    STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER,
+    STATE_UPGRADE_REQUESTED = 1<<4,
+    STATE_UPGRADE_WAITING = 1<<5,
+    STATE_UPGRADE_LOSER = 1<<6,
+    STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER
+};
+
+const unsigned char RELEASED = 0;
+const unsigned char ACQUIRED = 1;
+template<typename T>
+inline atomic<T>& as_atomic( T& t ) {
+    return *(atomic<T>*)&t;
+}
+
+inline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock()
+{
+    return as_atomic(internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED;
+}
+
+inline void queuing_rw_mutex::scoped_lock::acquire_internal_lock()
+{
+    // Usually, we would use the test-test-and-set idiom here, with exponential backoff.
+    // But so far, experiments indicate there is no value in doing so here.
+    while( !try_acquire_internal_lock() ) {
+        __TBB_Pause(1);
+    }
+}
+
+inline void queuing_rw_mutex::scoped_lock::release_internal_lock()
+{
+    __TBB_store_with_release(internal_lock,RELEASED);
+}
+
+inline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock()
+{
+    spin_wait_until_eq(internal_lock, RELEASED);
+}
+
+inline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) {
+    if( flag )
+        wait_for_release_of_internal_lock();
+    else
+        release_internal_lock();
+}
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings
+    #pragma warning (push)
+    #pragma warning (disable: 4311 4312)
+#endif
+
+//! A view of a T* with additional functionality for twiddling low-order bits.
+template<typename T>
+class tricky_atomic_pointer: no_copy {
+public:
+    typedef typename atomic_rep<sizeof(T*)>::word word;
+
+    template<memory_semantics M>
+    static T* fetch_and_add( T* volatile * location, word addend ) {
+        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );
+    }
+    template<memory_semantics M>
+    static T* fetch_and_store( T* volatile * location, T* value ) {
+        return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );
+    }
+    template<memory_semantics M>
+    static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {
+        return reinterpret_cast<T*>(
+                 atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),
+                                                              reinterpret_cast<word>(comparand))
+               );
+    }
+
+    T* & ref;
+    tricky_atomic_pointer( T*& original ) : ref(original) {};
+    tricky_atomic_pointer( T* volatile & original ) : ref(original) {};
+    T* operator&( word operand2 ) const {
+        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
+    }
+    T* operator|( word operand2 ) const {
+        return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
+    }
+};
+
+typedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer;
+
+#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings
+    #pragma warning (pop)
+#endif
+
+//! Mask for low order bit of a pointer.
+static const tricky_pointer::word FLAG = 0x1;
+
+inline
+uintptr_t get_flag( queuing_rw_mutex::scoped_lock* ptr ) { 
+    return uintptr_t(tricky_pointer(ptr)&FLAG);
+}
+
+//------------------------------------------------------------------------
+// Methods of queuing_rw_mutex::scoped_lock
+//------------------------------------------------------------------------
+
+void queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write )
+{
+    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
+
+    // Must set all fields before the fetch_and_store, because once the
+    // fetch_and_store executes, *this becomes accessible to other threads.
+    mutex = &m;
+    prev  = NULL;
+    next  = NULL;
+    going = 0;
+    state = state_t(write ? STATE_WRITER : STATE_READER);
+    internal_lock = RELEASED;
+
+    queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
+
+    if( write ) {       // Acquiring for write
+
+        if( pred ) {
+            ITT_NOTIFY(sync_prepare, mutex);
+            pred = tricky_pointer(pred) & ~FLAG;
+            __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
+            __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
+            // ensure release semantics on IPF
+           __TBB_store_with_release(pred->next,this);
+            spin_wait_until_eq(going, 1);
+        }
+
+    } else {            // Acquiring for read
+#if DO_ITT_NOTIFY
+        bool sync_prepare_done = false;
+#endif
+        if( pred ) {
+            unsigned short pred_state;
+            __TBB_ASSERT( !this->prev, "the predecessor is already set" );
+            if( tricky_pointer(pred)&FLAG ) {
+                /* this is only possible if pred is an upgrading reader and it signals us to wait */
+                pred_state = STATE_UPGRADE_WAITING;
+                pred = tricky_pointer(pred) & ~FLAG;
+            } else {
+                // Load pred->state now, because once pred->next becomes
+                // non-NULL, we must assume that *pred might be destroyed.
+                pred_state = pred->state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER);
+            }
+            this->prev = pred;
+            __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" );
+            __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
+            // ensure release semantics on IPF
+           __TBB_store_with_release(pred->next,this);
+            if( pred_state != STATE_ACTIVEREADER ) {
+#if DO_ITT_NOTIFY
+                sync_prepare_done = true;
+                ITT_NOTIFY(sync_prepare, mutex);
+#endif
+                spin_wait_until_eq(going, 1);
+            }
+        }
+        unsigned short old_state = state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);
+        if( old_state!=STATE_READER ) {
+#if DO_ITT_NOTIFY
+            if( !sync_prepare_done )
+                ITT_NOTIFY(sync_prepare, mutex);
+#endif
+            // Failed to become active reader -> need to unblock the next waiting reader first
+            __TBB_ASSERT( state==STATE_READER_UNBLOCKNEXT, "unexpected state" );
+            spin_wait_while_eq(next, (scoped_lock*)NULL);
+            /* state should be changed before unblocking the next otherwise it might finish
+               and another thread can get our old state and left blocked */
+            state = STATE_ACTIVEREADER;
+            // ensure release semantics on IPF
+           __TBB_store_with_release(next->going,1);
+        }
+    }
+
+    ITT_NOTIFY(sync_acquired, mutex);
+
+    // Force acquire so that user's critical section receives correct values
+    // from processor that was previously in the user's critical section.
+    __TBB_load_with_acquire(going);
+}
+
+bool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write )
+{
+    __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex");
+
+    // Must set all fields before the fetch_and_store, because once the
+    // fetch_and_store executes, *this becomes accessible to other threads.
+    prev  = NULL;
+    next  = NULL;
+    going = 0;
+    state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);
+    internal_lock = RELEASED;
+
+    if( m.q_tail ) return false;
+    // The CAS must have release semantics, because we are
+    // "sending" the fields initialized above to other processors.
+    queuing_rw_mutex::scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL);
+
+    // Force acquire so that user's critical section receives correct values
+    // from processor that was previously in the user's critical section.
+    // try_acquire should always have acquire semantic, even if failed.
+    __TBB_load_with_acquire(going);
+
+    if( !pred ) {
+        mutex = &m;
+        ITT_NOTIFY(sync_acquired, mutex);
+        return true;
+    } else return false;
+
+}
+
+void queuing_rw_mutex::scoped_lock::release( )
+{
+    __TBB_ASSERT(this->mutex!=NULL, "no lock acquired");
+
+    ITT_NOTIFY(sync_releasing, mutex);
+
+    if( state == STATE_WRITER ) { // Acquired for write
+
+        // The logic below is the same as "writerUnlock", but restructured to remove "return" in the middle of routine.
+        // In the statement below, acquire semantics of reading 'next' is required
+        // so that following operations with fields of 'next' are safe.
+        scoped_lock* n = __TBB_load_with_acquire(next);
+        if( !n ) {
+            if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
+                // this was the only item in the queue, and the queue is now empty.
+                goto done;
+            }
+            spin_wait_while_eq( next, (scoped_lock*)NULL );
+            n = next;
+        }
+        n->going = 2; // protect next queue node from being destroyed too early
+        if( n->state==STATE_UPGRADE_WAITING ) {
+            // the next waiting for upgrade means this writer was upgraded before.
+            acquire_internal_lock();
+            queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
+            n->state = STATE_UPGRADE_LOSER;
+            __TBB_store_with_release(n->going,1);
+            unblock_or_wait_on_internal_lock(get_flag(tmp));
+        } else {
+            __TBB_ASSERT( state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
+            __TBB_ASSERT( !( tricky_pointer(n->prev) & FLAG ), "use of corrupted pointer!" );
+            n->prev = NULL;
+            // ensure release semantics on IPF
+            __TBB_store_with_release(n->going,1);
+        }
+
+    } else { // Acquired for read
+
+        queuing_rw_mutex::scoped_lock *tmp = NULL;
+retry:
+        // Addition to the original paper: Mark this->prev as in use
+        queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
+
+        if( pred ) {
+            if( !(pred->try_acquire_internal_lock()) )
+            {
+                // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
+                // In the second case, it could or could not know my "in use" flag - need to check
+                tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
+                if( !(tricky_pointer(tmp)&FLAG) ) {
+                    // Wait for the predecessor to change this->prev (e.g. during unlink)
+                    spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
+                    // Now owner of pred is waiting for _us_ to release its lock
+                    pred->release_internal_lock();
+                }
+                else ; // The "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
+
+                tmp = NULL;
+                goto retry;
+            }
+            __TBB_ASSERT(pred && pred->internal_lock==ACQUIRED, "predecessor's lock is not acquired");
+            this->prev = pred;
+            acquire_internal_lock();
+
+            __TBB_store_with_release(pred->next,reinterpret_cast<scoped_lock *>(NULL));
+
+            if( !next && this != mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
+                spin_wait_while_eq( next, (void*)NULL );
+            }
+            __TBB_ASSERT( !get_flag(next), "use of corrupted pointer" );
+
+            // ensure acquire semantics of reading 'next'
+            if( __TBB_load_with_acquire(next) ) { // I->next != nil
+                // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
+                tmp = tricky_pointer::fetch_and_store<tbb::release>(&(next->prev), pred);
+                // I->prev->next = I->next;
+                __TBB_ASSERT(this->prev==pred, NULL);
+                __TBB_store_with_release(pred->next,next);
+            }
+            // Safe to release in the order opposite to acquiring which makes the code simplier
+            pred->release_internal_lock();
+
+        } else { // No predecessor when we looked
+            acquire_internal_lock();  // "exclusiveLock(&I->EL)"
+            // ensure acquire semantics of reading 'next'
+            scoped_lock* n = __TBB_load_with_acquire(next);
+            if( !n ) {
+                if( this != mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
+                    spin_wait_while_eq( next, (scoped_lock*)NULL );
+                    n = next;
+                } else {
+                    goto unlock_self;
+                }
+            }
+            n->going = 2; // protect next queue node from being destroyed too early
+            tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL);
+            // ensure release semantics on IPF
+            __TBB_store_with_release(n->going,1);
+        }
+unlock_self:
+        unblock_or_wait_on_internal_lock(get_flag(tmp));
+    }
+done:
+    spin_wait_while_eq( going, 2 );
+
+    initialize();
+}
+
+bool queuing_rw_mutex::scoped_lock::downgrade_to_reader()
+{
+    __TBB_ASSERT( state==STATE_WRITER, "no sense to downgrade a reader" );
+
+    ITT_NOTIFY(sync_releasing, mutex);
+
+    // ensure acquire semantics of reading 'next'
+    if( ! __TBB_load_with_acquire(next) ) {
+        state = STATE_READER;
+        if( this==mutex->q_tail ) {
+            unsigned short old_state = state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);
+            if( old_state==STATE_READER ) {
+                goto downgrade_done;
+            }
+        }
+        /* wait for the next to register */
+        spin_wait_while_eq( next, (void*)NULL );
+    }
+    __TBB_ASSERT( next, "still no successor at this point!" );
+    if( next->state & STATE_COMBINED_WAITINGREADER )
+        __TBB_store_with_release(next->going,1);
+    else if( next->state==STATE_UPGRADE_WAITING )
+        // the next waiting for upgrade means this writer was upgraded before.
+        next->state = STATE_UPGRADE_LOSER;
+    state = STATE_ACTIVEREADER;
+
+downgrade_done:
+    return true;
+}
+
+bool queuing_rw_mutex::scoped_lock::upgrade_to_writer()
+{
+    __TBB_ASSERT( state==STATE_ACTIVEREADER, "only active reader can be upgraded" );
+
+    queuing_rw_mutex::scoped_lock * tmp;
+    queuing_rw_mutex::scoped_lock * me = this;
+
+    ITT_NOTIFY(sync_releasing, mutex);
+    state = STATE_UPGRADE_REQUESTED;
+requested:
+    __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
+    acquire_internal_lock();
+    if( this != mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {
+        spin_wait_while_eq( next, (void*)NULL );
+        queuing_rw_mutex::scoped_lock * n;
+        n = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->next), FLAG);
+        unsigned short n_state = n->state;
+        /* the next reader can be blocked by our state. the best thing to do is to unblock it */
+        if( n_state & STATE_COMBINED_WAITINGREADER )
+            __TBB_store_with_release(n->going,1);
+        tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), this);
+        unblock_or_wait_on_internal_lock(get_flag(tmp));
+        if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {
+            // save n|FLAG for simplicity of following comparisons
+            tmp = tricky_pointer(n)|FLAG;
+            atomic_backoff backoff;
+            while(next==tmp) {
+                if( state & STATE_COMBINED_UPGRADING ) {
+                    if( __TBB_load_with_acquire(next)==tmp )
+                        next = n;
+                    goto waiting;
+                }
+                backoff.pause();
+            }
+            __TBB_ASSERT(next!=(tricky_pointer(n)|FLAG), NULL);
+            goto requested;
+        } else {
+            __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
+            __TBB_ASSERT( (tricky_pointer(n)|FLAG)==next, NULL);
+            next = n;
+        }
+    } else {
+        /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
+        release_internal_lock();
+    } // if( this != mutex->q_tail... )
+    state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
+
+waiting:
+    __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" );
+    __TBB_ASSERT( state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
+    __TBB_ASSERT( me==this, NULL );
+    ITT_NOTIFY(sync_prepare, mutex);
+    /* if noone was blocked by the "corrupted" q_tail, turn it back */
+    mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );
+    queuing_rw_mutex::scoped_lock * pred;
+    pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG);
+    if( pred ) {
+        bool success = pred->try_acquire_internal_lock();
+        pred->state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
+        if( !success ) {
+            tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG );
+            if( tricky_pointer(tmp)&FLAG ) {
+                spin_wait_while_eq(this->prev, pred);
+                pred = this->prev;
+            } else {
+                spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG );
+                pred->release_internal_lock();
+            }
+        } else {
+            this->prev = pred;
+            pred->release_internal_lock();
+            spin_wait_while_eq(this->prev, pred);
+            pred = this->prev;
+        }
+        if( pred )
+            goto waiting;
+    } else {
+        // restore the corrupted prev field for possible further use (e.g. if downgrade back to reader)
+        this->prev = pred;
+    }
+    __TBB_ASSERT( !pred && !this->prev, NULL );
+
+    // additional lifetime issue prevention checks
+    // wait for the successor to finish working with my fields
+    wait_for_release_of_internal_lock();
+    // now wait for the predecessor to finish working with my fields
+    spin_wait_while_eq( going, 2 );
+    // there is an acquire semantics statement in the end of spin_wait_while_eq.
+
+    bool result = ( state != STATE_UPGRADE_LOSER );
+    state = STATE_WRITER;
+    going = 1;
+
+    ITT_NOTIFY(sync_acquired, mutex);
+    return result;
+}
+
+void queuing_rw_mutex::internal_construct() {
+    ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T(""));
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/reader_writer_lock.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/reader_writer_lock.cpp
new file mode 100644 (file)
index 0000000..2f4e3f6
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/reader_writer_lock.h"
+#include "tbb/tbb_machine.h"
+#include "tbb/tbb_exception.h"
+#include "itt_notify.h"
+
+namespace tbb {
+namespace interface5 {
+
+const unsigned WFLAG1 = 0x1;  // writer interested or active
+const unsigned WFLAG2 = 0x2;  // writers interested, no entering readers
+const unsigned RFLAG = 0x4;   // reader interested but not active
+const unsigned RC_INCR = 0x8; // to adjust reader count
+
+
+// Perform an atomic bitwise-OR on the operand with the addend, and return
+// the previous value of the operand.
+inline unsigned fetch_and_or(atomic<unsigned>& operand, unsigned addend) {
+    tbb::internal::atomic_backoff backoff;
+    for (;;) {
+        unsigned old = operand;
+        unsigned result = operand.compare_and_swap(old|addend, old);
+        if (result==old) return old;
+        backoff.pause();
+    }
+}
+
+// Perform an atomic bitwise-AND on the operand with the addend, and return
+// the previous value of the operand.
+inline unsigned fetch_and_and(atomic<unsigned>& operand, unsigned addend) {
+    tbb::internal::atomic_backoff backoff;
+    for (;;) {
+        unsigned old = operand;
+        unsigned result = operand.compare_and_swap(old&addend, old);
+        if (result==old) return old;
+        backoff.pause();
+    }
+}
+
+//! Spin WHILE the value at the location is greater than or equal to a given value
+/** T and U should be comparable types. */
+template<typename T, typename U>
+void spin_wait_while_geq( const volatile T& location, U value ) {
+    tbb::internal::atomic_backoff backoff;
+    while( location>=value ) backoff.pause();
+}
+
+//! Spin UNTIL (location & value) is true.
+/** T and U should be comparable types. */
+template<typename T, typename U>
+void spin_wait_until_and( const volatile T& location, U value ) {
+    tbb::internal::atomic_backoff backoff;
+    while( !(location & value) ) backoff.pause();
+}
+
+
+void reader_writer_lock::internal_construct() {
+    reader_head = NULL;
+    writer_head = NULL;
+    writer_tail = NULL;
+    rdr_count_and_flags = 0;
+    my_current_writer = tbb_thread::id();
+#if TBB_USE_THREADING_TOOLS
+    ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T(""));
+#endif /* TBB_USE_THREADING_TOOLS */
+}
+
+void reader_writer_lock::internal_destroy() {
+    __TBB_ASSERT(rdr_count_and_flags==0, "reader_writer_lock destroyed with pending readers/writers.");
+    __TBB_ASSERT(reader_head==NULL, "reader_writer_lock destroyed with pending readers.");
+    __TBB_ASSERT(writer_tail==NULL, "reader_writer_lock destroyed with pending writers.");
+    __TBB_ASSERT(writer_head==NULL, "reader_writer_lock destroyed with pending/active writers.");
+}
+
+// Acquires the reader_writer_lock for write.    If the lock is currently held in write 
+// mode by another context, the writer will block by spinning on a local variable. 
+// Throws exception improper_lock if the context tries to acquire a
+// reader_writer_lock that it already has write ownership of.
+void reader_writer_lock::lock() {
+    if (is_current_writer()) { // recursive lock attempt
+        // we don't support recursive writer locks; throw exception
+        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);
+    }
+    else {
+        scoped_lock *a_writer_lock = new scoped_lock();
+        (void) start_write(a_writer_lock);
+    }
+}
+
+// Tries to acquire the reader_writer_lock for write.    This function does not block.
+// Return Value: True or false, depending on whether the lock is acquired or not.    
+// If the lock is already held by this acquiring context, try_lock() returns false.
+bool reader_writer_lock::try_lock() {
+    if (is_current_writer()) { // recursive lock attempt
+        return false;
+    }
+    else {
+        scoped_lock *a_writer_lock = new scoped_lock();
+        a_writer_lock->status = waiting_nonblocking;
+        return start_write(a_writer_lock);
+    }
+}
+    
+bool reader_writer_lock::start_write(scoped_lock *I) {
+    tbb_thread::id id = this_tbb_thread::get_id();
+    scoped_lock *pred = NULL;
+    if (I->status == waiting_nonblocking) {
+        if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {
+            delete I;
+            return false;
+        }
+    }
+    else {
+        ITT_NOTIFY(sync_prepare, this);
+        pred = writer_tail.fetch_and_store(I);
+    }
+    if (pred) 
+        pred->next = I;
+    else {
+        set_next_writer(I);
+        if (I->status == waiting_nonblocking) {
+            if (I->next) { // potentially more writers
+                set_next_writer(I->next);
+            }
+            else { // no more writers
+                writer_head.fetch_and_store(NULL);
+                if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
+                    spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added
+                    __TBB_ASSERT(I->next, "There should be a node following the last writer.");
+                    set_next_writer(I->next);
+                }
+            }
+            delete I;
+            return false;
+        }
+    }
+    spin_wait_while_eq(I->status, waiting);
+    ITT_NOTIFY(sync_acquired, this);
+    my_current_writer = id;
+    return true;
+}
+    
+void reader_writer_lock::set_next_writer(scoped_lock *W) {
+    writer_head = W;
+    if (W->status == waiting_nonblocking) {
+        if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) {
+            W->status = active;
+        }
+    }
+    else {
+        if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present
+            spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2
+        }
+        else { // no reader in timing window
+            __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);
+        } 
+        spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish
+        W->status = active;
+   }
+}
+    
+// Acquires the reader_writer_lock for read.    If the lock is currently held by a writer,
+// this reader will block and wait until the writers are done. 
+// Throws exception improper_lock when the context tries to acquire a reader_writer_lock 
+// that it already has write ownership of.
+void reader_writer_lock::lock_read() {
+    if (is_current_writer()) { // recursive lock attempt
+        // we don't support writer->reader downgrade; throw exception
+        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);
+    }
+    else {
+        scoped_lock_read a_reader_lock;
+        start_read(&a_reader_lock);
+    }
+}
+    
+// Tries to acquire the reader_writer_lock for read.    This function does not block.
+// Return Value: True or false, depending on whether the lock is acquired or not.    
+bool reader_writer_lock::try_lock_read() {
+    if (is_current_writer()) { // recursive lock attempt
+        return false;
+    }
+    else {
+        if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present
+            rdr_count_and_flags -= RC_INCR;
+            return false;
+        }
+        else { // no writers
+            ITT_NOTIFY(sync_acquired, this);
+            return true;
+        }
+    }
+}
+
+void reader_writer_lock::start_read(scoped_lock_read *I) {
+    ITT_NOTIFY(sync_prepare, this);
+    I->next = reader_head.fetch_and_store(I);
+    if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags
+        // unblock and/or update statuses of non-blocking readers
+        if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers
+            unblock_readers();
+        }
+    }
+    __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking.");
+    spin_wait_while_eq(I->status, waiting); // block
+    if (I->next) {
+        __TBB_ASSERT(I->next->status == waiting, NULL);
+        rdr_count_and_flags += RC_INCR;
+        I->next->status = active; // wake successor
+    }
+    ITT_NOTIFY(sync_acquired, this);
+}
+
+void reader_writer_lock::unblock_readers() {
+    // clear rdr interest flag, increment rdr count
+    __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL);
+    rdr_count_and_flags += RC_INCR-RFLAG;
+    __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL);
+    // indicate clear of window
+    if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) {
+        __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2);
+    }
+    // unblock waiting readers 
+    scoped_lock_read *head = reader_head.fetch_and_store(NULL);
+    __TBB_ASSERT(head, NULL);
+    __TBB_ASSERT(head->status == waiting, NULL);
+    head->status = active;
+}
+    
+// Releases the reader_writer_lock
+void reader_writer_lock::unlock() {
+    if( my_current_writer!=tbb_thread::id() ) {
+        // A writer owns the lock
+        __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock.");
+        __TBB_ASSERT(writer_head, NULL);
+        __TBB_ASSERT(writer_head->status==active, NULL);
+        scoped_lock *a_writer_lock = writer_head;
+        end_write(a_writer_lock);
+        __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference.");
+        delete a_writer_lock;
+    } else {
+        end_read();
+    }
+}
+    
+void reader_writer_lock::end_write(scoped_lock *I) {
+    __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock.");
+    my_current_writer = tbb_thread::id();
+    ITT_NOTIFY(sync_releasing, this);
+    if (I->next) { // potentially more writers
+        writer_head = I->next;
+        writer_head->status = active;
+    }
+    else { // No more writers; clear writer flag, test reader interest flag
+        __TBB_ASSERT(writer_head, NULL);
+        if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) {
+            unblock_readers();
+        }
+        writer_head.fetch_and_store(NULL);
+        if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
+            spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added
+            __TBB_ASSERT(I->next, "There should be a node following the last writer.");
+            set_next_writer(I->next);
+        }
+    }
+}
+    
+void reader_writer_lock::end_read() {
+    ITT_NOTIFY(sync_releasing, this);
+    __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock.");
+    rdr_count_and_flags -= RC_INCR;
+}
+
+inline bool reader_writer_lock::is_current_writer() {
+    return my_current_writer==this_tbb_thread::get_id();
+}
+
+// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock 
+void reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock& lock) {
+    mutex = &lock;
+    next = NULL;
+    status = waiting;
+    if (mutex->is_current_writer()) { // recursive lock attempt
+        // we don't support recursive writer locks; throw exception
+        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);
+    }
+    else { // this thread holds no locks
+        (void) mutex->start_write(this);
+    }
+}
+
+inline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) { 
+    status = waiting;
+}
+
+// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock 
+void reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock& lock) {
+    mutex = &lock;
+    next = NULL;
+    status = waiting;
+    if (mutex->is_current_writer()) { // recursive lock attempt
+        // we don't support writer->reader downgrade; throw exception
+        tbb::internal::throw_exception(tbb::internal::eid_improper_lock);
+    }
+    else { // this thread holds no locks
+        mutex->start_read(this);
+    }
+}
+  
+inline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) {
+    status = waiting;
+}
+
+void reader_writer_lock::scoped_lock::internal_destroy() {
+    if (mutex) {
+        __TBB_ASSERT(mutex->is_current_writer(), "~scoped_lock() destroyed by thread different than thread that holds lock.");
+        mutex->end_write(this);
+    }
+    status = invalid;
+}
+
+void reader_writer_lock::scoped_lock_read::internal_destroy() { 
+    if (mutex)  
+        mutex->end_read(); 
+    status = invalid;
+}
+
+} // namespace interface5
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/recursive_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/recursive_mutex.cpp
new file mode 100644 (file)
index 0000000..7083020
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/recursive_mutex.h"
+#include "itt_notify.h"
+
+namespace tbb {
+
+void recursive_mutex::scoped_lock::internal_acquire( recursive_mutex& m ) {
+#if _WIN32||_WIN64
+    switch( m.state ) {
+      case INITIALIZED: 
+        // since we cannot look into the internal of the CriticalSection object
+        // we won't know how many times the lock has been acquired, and thus
+        // we won't know when we may safely set the state back to INITIALIZED
+        // if we change the state to HELD as in mutex.cpp.  thus, we won't change
+        // the state for recursive_mutex
+        EnterCriticalSection( &m.impl );
+        break;
+      case DESTROYED: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); 
+        break;
+      default: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state");
+        break;
+    }
+#else
+    int error_code = pthread_mutex_lock(&m.impl);
+    __TBB_ASSERT_EX(!error_code,"recursive_mutex::scoped_lock: pthread_mutex_lock failed");
+#endif /* _WIN32||_WIN64 */
+    my_mutex = &m;
+}
+
+void recursive_mutex::scoped_lock::internal_release() {
+    __TBB_ASSERT( my_mutex, "recursive_mutex::scoped_lock: not holding a mutex" );
+#if _WIN32||_WIN64    
+    switch( my_mutex->state ) {
+      case INITIALIZED: 
+        LeaveCriticalSection( &my_mutex->impl );
+        break;
+      case DESTROYED: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); 
+        break;
+      default: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state");
+        break;
+    }
+#else
+     int error_code = pthread_mutex_unlock(&my_mutex->impl);
+     __TBB_ASSERT_EX(!error_code, "recursive_mutex::scoped_lock: pthread_mutex_unlock failed");
+#endif /* _WIN32||_WIN64 */
+     my_mutex = NULL;
+}
+
+bool recursive_mutex::scoped_lock::internal_try_acquire( recursive_mutex& m ) {
+#if _WIN32||_WIN64
+    switch( m.state ) {
+      case INITIALIZED: 
+        break;
+      case DESTROYED: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); 
+        break;
+      default: 
+        __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state");
+        break;
+    }
+#endif /* _WIN32||_WIN64 */
+    bool result;
+#if _WIN32||_WIN64
+    result = TryEnterCriticalSection(&m.impl)!=0;
+#else
+    result = pthread_mutex_trylock(&m.impl)==0;
+#endif /* _WIN32||_WIN64 */
+    if( result )
+        my_mutex = &m;
+    return result;
+}
+
+void recursive_mutex::internal_construct() {
+#if _WIN32||_WIN64
+    InitializeCriticalSection(&impl);
+    state = INITIALIZED;
+#else
+    pthread_mutexattr_t mtx_attr;
+    int error_code = pthread_mutexattr_init( &mtx_attr );
+    if( error_code )
+        tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed");
+
+    pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE );
+    error_code = pthread_mutex_init( &impl, &mtx_attr );
+    if( error_code )
+        tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed");
+    pthread_mutexattr_destroy( &mtx_attr );
+#endif /* _WIN32||_WIN64*/    
+    ITT_SYNC_CREATE(&impl, _T("tbb::recursive_mutex"), _T(""));
+}
+
+void recursive_mutex::internal_destroy() {
+#if _WIN32||_WIN64
+    switch( state ) {
+      case INITIALIZED:
+        DeleteCriticalSection(&impl);
+        break;
+      case DESTROYED: 
+        __TBB_ASSERT(false,"recursive_mutex: already destroyed");
+        break;
+      default: 
+         __TBB_ASSERT(false,"recursive_mutex: illegal state for destruction");
+         break;
+    }
+    state = DESTROYED;
+#else
+    int error_code = pthread_mutex_destroy(&impl); 
+    __TBB_ASSERT_EX(!error_code,"recursive_mutex: pthread_mutex_destroy failed");
+#endif /* _WIN32||_WIN64 */
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.cpp
new file mode 100644 (file)
index 0000000..c0b4f18
--- /dev/null
@@ -0,0 +1,1176 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_machine.h"
+
+#include "custom_scheduler.h"
+#include "scheduler_utility.h"
+#include "governor.h"
+#include "market.h"
+#include "arena.h"
+#include "mailbox.h"
+#include "observer_proxy.h"
+#include "itt_notify.h"
+
+namespace tbb {
+namespace internal {
+
+/** Defined in tbb_main.cpp **/
+extern generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index );
+
+inline generic_scheduler* allocate_scheduler ( arena* a, size_t index ) { 
+    return AllocateSchedulerPtr(a, index);
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+#if !__TBB_ARENA_PER_MASTER
+//! Head of the list of master thread schedulers.
+static scheduler_list_node_t the_scheduler_list_head;
+
+//! Mutex protecting access to the list of schedulers.
+static mutex the_scheduler_list_mutex;
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+//! Counter that is incremented whenever new cancellation signal is sent to a task group.
+/** Together with generic_scheduler::local_cancel_count forms cross-thread signaling
+    mechanism that allows to avoid locking at the hot path of normal execution flow.
+
+    When a descendant task group context is being registered or unregistered,
+    the global and local counters are compared. If they differ, it means that 
+    a cancellation signal is being propagated, and registration/deregistration
+    routines take slower branch that may block (at most one thread of the pool
+    can be blocked at any moment). Otherwise the control path is lock-free and fast. **/
+uintptr_t global_cancel_count = 0;
+
+//! Context to be associated with dummy tasks of worker threads schedulers.
+/** It is never used for its direct purpose, and is introduced solely for the sake 
+    of avoiding one extra conditional branch in the end of wait_for_all method. **/
+static task_group_context dummy_context(task_group_context::isolated);
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+void Scheduler_OneTimeInitialization ( bool itt_present ) {
+    AllocateSchedulerPtr = itt_present ? &custom_scheduler<DefaultSchedulerTraits>::allocate_scheduler :
+                                      &custom_scheduler<IntelSchedulerTraits>::allocate_scheduler;
+#if __TBB_TASK_GROUP_CONTEXT && !__TBB_ARENA_PER_MASTER
+    ITT_SYNC_CREATE(&the_scheduler_list_mutex, SyncType_GlobalLock, SyncObj_SchedulersList);
+    the_scheduler_list_head.my_next = &the_scheduler_list_head;
+    the_scheduler_list_head.my_prev = &the_scheduler_list_head;
+#endif /* __TBB_TASK_GROUP_CONTEXT && !__TBB_ARENA_PER_MASTER */
+}
+
+//------------------------------------------------------------------------
+// scheduler interface
+//------------------------------------------------------------------------
+
+//  A pure virtual destructor should still have a body
+//  so the one for tbb::internal::scheduler::~scheduler() is provided here
+scheduler::~scheduler( ) {}
+
+//------------------------------------------------------------------------
+// generic_scheduler
+//------------------------------------------------------------------------
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous compiler warning about using 'this' in base initializer list. 
+    #pragma warning(push)
+    #pragma warning(disable:4355)
+#endif
+
+generic_scheduler::generic_scheduler( arena* a, size_t index ) :
+    my_stealing_threshold(0),
+    arena_index(index),
+    task_pool_size(0),
+    my_arena_slot(&dummy_slot),
+#if __TBB_ARENA_PER_MASTER
+    my_market(NULL),
+#endif /* __TBB_ARENA_PER_MASTER */
+    my_arena(a),
+    random( unsigned(this-(generic_scheduler*)NULL) ),
+    free_list(NULL),
+    innermost_running_task(NULL),
+    dummy_task(NULL),
+    ref_count(1),
+    my_affinity_id(0),
+    is_registered(false),
+    is_auto_initialized(false),
+#if __TBB_SCHEDULER_OBSERVER
+    local_last_observer_proxy(NULL),
+#endif /* __TBB_SCHEDULER_OBSERVER */
+#if __TBB_COUNT_TASK_NODES
+    task_node_count(0),
+#endif /* __TBB_COUNT_TASK_NODES */
+    small_task_count(1),   // Extra 1 is a guard reference
+    return_list(NULL),
+#if __TBB_TASK_GROUP_CONTEXT
+    local_ctx_list_update(0),
+    nonlocal_ctx_list_update(0)
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+#if __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT
+   ,my_cilk_state(cs_none)
+#endif /* __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT */
+{
+    dummy_slot.task_pool = allocate_task_pool( min_task_pool_size );
+    dummy_slot.head = dummy_slot.tail = 0;
+    dummy_task = &allocate_task( sizeof(task), __TBB_CONTEXT_ARG(NULL, NULL) );
+#if __TBB_TASK_GROUP_CONTEXT
+    context_list_head.my_prev = &context_list_head;
+    context_list_head.my_next = &context_list_head;
+    ITT_SYNC_CREATE(&context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    dummy_task->prefix().ref_count = 2;
+    ITT_SYNC_CREATE(&dummy_task->prefix().ref_count, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt);
+    ITT_SYNC_CREATE(&return_list, SyncType_Scheduler, SyncObj_TaskReturnList);
+    assert_task_pool_valid();
+#if __TBB_SURVIVE_THREAD_SWITCH
+    my_cilk_unwatch_thunk.routine = NULL;
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning(pop)
+#endif // warning 4355 is back
+
+#if TBB_USE_ASSERT > 1
+bool generic_scheduler::assert_task_pool_valid() const {
+    acquire_task_pool();
+    task** tp = dummy_slot.task_pool;
+    __TBB_ASSERT( task_pool_size >= min_task_pool_size, NULL );
+    __TBB_ASSERT( my_arena_slot->head <= my_arena_slot->tail, NULL );
+    for ( size_t i = 0; i < my_arena_slot->head; ++i )
+        __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" );
+    for ( size_t i = my_arena_slot->head; i < my_arena_slot->tail; ++i ) {
+        __TBB_ASSERT( (uintptr_t)tp[i] + 1 > 1u, "nil or invalid task pointer in the deque" );
+        __TBB_ASSERT( tp[i]->prefix().state == task::ready ||
+                      tp[i]->prefix().extra_state == es_task_proxy, "task in the deque has invalid state" );
+    }
+    for ( size_t i = my_arena_slot->tail; i < task_pool_size; ++i )
+        __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" );
+    release_task_pool();
+}
+#endif /* TBB_USE_ASSERT > 1 */
+
+#if __TBB_TASK_GROUP_CONTEXT
+void generic_scheduler::propagate_cancellation () {
+    spin_mutex::scoped_lock lock(context_list_mutex);
+    // Acquire fence is necessary to ensure that the subsequent node->my_next load 
+    // returned the correct value in case it was just inserted in another thread.
+    // The fence also ensures visibility of the correct my_parent value.
+    context_list_node_t *node = __TBB_load_with_acquire(context_list_head.my_next);
+    while ( node != &context_list_head ) {
+        task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
+            // The absence of acquire fence while reading my_cancellation_requested may result 
+            // in repeated traversals of the same parents chain if another group (precedent or 
+            // descendant) belonging to the tree being canceled sends cancellation request of 
+            // its own around the same time.
+        if ( !ctx.my_cancellation_requested )
+            ctx.propagate_cancellation_from_ancestors();
+        node = node->my_next;
+        __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Walked into a destroyed context while propagating cancellation" );
+    }
+    // Sync up local cancelation epoch with the global one. Release fence prevents 
+    // reordering of possible store to my_cancellation_requested after the sync point.
+    __TBB_store_with_release(local_cancel_count, global_cancel_count);
+}
+
+#if !__TBB_ARENA_PER_MASTER
+/** Propagates cancellation down the tree of dependent contexts by walking each 
+    thread's local list of contexts **/
+void generic_scheduler::propagate_cancellation ( task_group_context& ctx ) {
+    __TBB_ASSERT ( ctx.my_cancellation_requested, "No cancellation request in the context" );
+    // The whole propagation algorithm is under the lock in order to ensure correctness 
+    // in case of parallel cancellations at the different levels of the context tree.
+    // See the note 2 at the bottom of the file.
+    mutex::scoped_lock lock(the_scheduler_list_mutex);
+    // Advance global cancellation state
+    __TBB_FetchAndAddWrelease(&global_cancel_count, 1);
+    // First propagate to workers using arena to access their context lists
+    size_t num_workers = my_arena->prefix().number_of_workers;
+    for ( size_t i = 0; i < num_workers; ++i ) {
+        // No fence is necessary here since the context list of worker's scheduler 
+        // can contain anything of interest only after the first stealing was done
+        // by that worker. And doing it applies the necessary fence
+        generic_scheduler *s = my_arena->prefix().worker_list[i].scheduler;
+        // If the worker is in the middle of its startup sequence, skip it.
+        if ( s )
+            s->propagate_cancellation();
+    }
+    // Then propagate to masters using the global list of master's schedulers
+    scheduler_list_node_t *node = the_scheduler_list_head.my_next;
+    while ( node != &the_scheduler_list_head ) {
+        __TBB_get_object_ref(generic_scheduler, my_node, node).propagate_cancellation();
+        node = node->my_next;
+    }
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+
+void generic_scheduler::init_stack_info () {
+    // Stacks are growing top-down. Highest address is called "stack base", 
+    // and the lowest is "stack limit".
+#if __TBB_ARENA_PER_MASTER
+    __TBB_ASSERT( !my_stealing_threshold, "Stealing threshold has already been calculated" );
+    size_t  stack_size = my_market->worker_stack_size();
+#else /* !__TBB_ARENA_PER_MASTER */
+    size_t  stack_size = my_arena->prefix().stack_size;
+#endif /* !__TBB_ARENA_PER_MASTER */
+#if USE_WINTHREAD
+#if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64
+    NT_TIB  *pteb = (NT_TIB*)__TBB_machine_get_current_teb();
+#else
+    NT_TIB  *pteb = (NT_TIB*)NtCurrentTeb();
+#endif
+    __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit, "invalid stack info in TEB" );
+    __TBB_ASSERT( stack_size >0, "stack_size not initialized?" );
+    // When a thread is created with the attribute STACK_SIZE_PARAM_IS_A_RESERVATION, stack limit 
+    // in the TIB points to the committed part of the stack only. This renders the expression
+    // "(uintptr_t)pteb->StackBase / 2 + (uintptr_t)pteb->StackLimit / 2" virtually useless.
+    // Thus for worker threads we use the explicit stack size we used while creating them.
+    // And for master threads we rely on the following fact and assumption:
+    // - the default stack size of a master thread on Windows is 1M;
+    // - if it was explicitly set by the application it is at least as large as the size of a worker stack.
+    if ( is_worker() || stack_size < MByte )
+        my_stealing_threshold = (uintptr_t)pteb->StackBase - stack_size / 2;
+    else
+        my_stealing_threshold = (uintptr_t)pteb->StackBase - MByte / 2;
+#else /* USE_PTHREAD */
+    // There is no portable way to get stack base address in Posix, so we use 
+    // non-portable method (on all modern Linux) or the simplified approach 
+    // based on the common sense assumptions. The most important assumption 
+    // is that the main thread's stack size is not less than that of other threads.
+    void    *stack_base = &stack_size;
+#if __TBB_ipf
+    void    *rsb_base = __TBB_get_bsp();
+#endif
+#if __linux__
+    size_t  np_stack_size = 0;
+    void    *stack_limit = NULL;
+    pthread_attr_t  attr_stack, np_attr_stack;
+    if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
+        if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
+            if ( 0 == pthread_attr_init(&attr_stack) ) {
+                if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) )
+                {
+                    stack_base = (char*)stack_limit + np_stack_size;
+                    if ( np_stack_size < stack_size ) {
+                        // We are in a secondary thread. Use reliable data.
+#if __TBB_ipf
+                        // IA64 stack is split into RSE backup and memory parts
+                        rsb_base = stack_limit;
+                        stack_size = np_stack_size/2;
+#else
+                        stack_size = np_stack_size;
+#endif /* !__TBB_ipf */
+                    }
+                    // We are either in the main thread or this thread stack 
+                    // is bigger that that of the main one. As we cannot discern
+                    // these cases we fall back to the default (heuristic) values.
+                }
+                pthread_attr_destroy(&attr_stack);
+            }
+        }
+        pthread_attr_destroy(&np_attr_stack);
+    }
+#endif /* __linux__ */
+    __TBB_ASSERT( stack_size>0, "stack size must be positive" );
+    my_stealing_threshold = (uintptr_t)((char*)stack_base - stack_size/2);
+#if __TBB_ipf
+    my_rsb_stealing_threshold = (uintptr_t)((char*)rsb_base + stack_size/2);
+#endif
+#endif /* USE_PTHREAD */
+}
+
+/** The function uses synchronization scheme similar to the one in the destructor
+    of task_group_context augmented with interlocked state change of each context
+    object. The purpose of this algo is to prevent threads doing nonlocal context
+    destruction from accessing destroyed owner-scheduler instance still pointed to 
+    by the context object. **/
+void generic_scheduler::cleanup_local_context_list () {
+    // Detach contexts remaining in the local list
+    bool wait_for_concurrent_destroyers_to_leave = false;
+    uintptr_t local_count_snapshot = local_cancel_count;
+    local_ctx_list_update = 1;
+    {
+        // This is just a definition. Actual lock is acquired only in case of conflict.
+        spin_mutex::scoped_lock lock;
+        // Full fence prevents reordering of store to local_ctx_list_update with 
+        // load from nonlocal_ctx_list_update.
+        __TBB_full_memory_fence();
+        // Check for the conflict with concurrent destroyer or cancelation propagator
+        if ( nonlocal_ctx_list_update || local_count_snapshot != global_cancel_count )
+            lock.acquire(context_list_mutex);
+        // No acquire fence is necessary for loading context_list_head.my_next,
+        // as the list can be updated by this thread only.
+        context_list_node_t *node = context_list_head.my_next;
+        while ( node != &context_list_head ) {
+            task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
+            __TBB_ASSERT( ctx.my_kind != task_group_context::binding_required, "Only a context bound to a root task can be detached" );
+            node = node->my_next;
+            __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Walked into a destroyed context while detaching contexts from the local list" );
+            // On 64-bit systems my_kind can be a 32-bit value padded with 32 uninitialized bits.
+            // So the cast below is necessary to throw off the higher bytes containing garbage
+            if ( (task_group_context::kind_type)(uintptr_t)__TBB_FetchAndStoreW(&ctx.my_kind, task_group_context::detached) == task_group_context::dying )
+                wait_for_concurrent_destroyers_to_leave = true;
+        }
+    }
+    __TBB_store_with_release( local_ctx_list_update, 0 );
+    // Wait until other threads referencing this scheduler object finish with it
+    if ( wait_for_concurrent_destroyers_to_leave )
+        spin_wait_until_eq( nonlocal_ctx_list_update, 0u );
+}
+
+void generic_scheduler::free_scheduler() {
+    if( in_arena() ) {
+        acquire_task_pool();
+        leave_arena();
+    }
+#if __TBB_TASK_GROUP_CONTEXT
+    cleanup_local_context_list();
+#if !__TBB_ARENA_PER_MASTER
+    task_group_context* default_context = dummy_task->prefix().context;
+    if ( default_context != &dummy_context) {
+        // Only master thread's dummy task has a dynamically allocated context
+        default_context->task_group_context::~task_group_context();
+        NFS_Free(default_context);
+        {
+            mutex::scoped_lock lock(the_scheduler_list_mutex);
+            my_node.my_next->my_prev = my_node.my_prev;
+            my_node.my_prev->my_next = my_node.my_next;
+        }
+    }
+#endif /* !__TBB_ARENA_PER_MASTER */
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    free_task<small_local_task>( *dummy_task );
+
+    // k accounts for a guard reference and each task that we deallocate.
+    intptr_t k = 1;
+    for(;;) {
+        while( task* t = free_list ) {
+            free_list = t->prefix().next;
+            deallocate_task(*t);
+            ++k;
+        }
+        if( return_list==plugged_return_list() ) 
+            break;
+        free_list = (task*)__TBB_FetchAndStoreW( &return_list, (intptr_t)plugged_return_list() );
+    }
+#if __TBB_COUNT_TASK_NODES
+#if __TBB_ARENA_PER_MASTER
+    my_market->update_task_node_count( task_node_count );
+#else /* !__TBB_ARENA_PER_MASTER */
+    my_arena->prefix().task_node_count += task_node_count;
+#endif /* !__TBB_ARENA_PER_MASTER */
+#endif /* __TBB_COUNT_TASK_NODES */
+#if !__TBB_ARENA_PER_MASTER && __TBB_STATISTICS
+    dump_statistics(my_counters, arena_index < my_arena->prefix().number_of_workers ? arena_index + 1 : 0 );
+#endif /* !__TBB_ARENA_PER_MASTER && __TBB_STATISTICS */
+    free_task_pool( dummy_slot.task_pool );
+    dummy_slot.task_pool = NULL;
+    // Update small_task_count last.  Doing so sooner might cause another thread to free *this.
+    __TBB_ASSERT( small_task_count>=k, "small_task_count corrupted" );
+    governor::sign_off(this);
+    if( __TBB_FetchAndAddW( &small_task_count, -k )==k )
+        NFS_Free( this );
+}
+
+task& generic_scheduler::allocate_task( size_t number_of_bytes, 
+                                            __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ) {
+    GATHER_STATISTIC(++my_counters.active_tasks);
+    task* t = free_list;
+    if( number_of_bytes<=quick_task_size ) {
+        if( t ) {
+            GATHER_STATISTIC(--my_counters.free_list_length);
+            __TBB_ASSERT( t->state()==task::freed, "free list of tasks is corrupted" );
+            free_list = t->prefix().next;
+        } else if( return_list ) {
+            // No fence required for read of return_list above, because __TBB_FetchAndStoreW has a fence.
+            t = (task*)__TBB_FetchAndStoreW( &return_list, 0 );
+            __TBB_ASSERT( t, "another thread emptied the return_list" );
+            __TBB_ASSERT( t->prefix().origin==this, "task returned to wrong return_list" );
+            ITT_NOTIFY( sync_acquired, &return_list );
+            free_list = t->prefix().next;
+        } else {
+            t = (task*)((char*)NFS_Allocate( task_prefix_reservation_size+quick_task_size, 1, NULL ) + task_prefix_reservation_size );
+#if __TBB_COUNT_TASK_NODES
+            ++task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+            t->prefix().origin = this;
+            ++small_task_count;
+        }
+    } else {
+        GATHER_STATISTIC(++my_counters.big_tasks);
+        t = (task*)((char*)NFS_Allocate( task_prefix_reservation_size+number_of_bytes, 1, NULL ) + task_prefix_reservation_size );
+#if __TBB_COUNT_TASK_NODES
+        ++task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+        t->prefix().origin = NULL;
+    }
+    task_prefix& p = t->prefix();
+#if __TBB_TASK_GROUP_CONTEXT
+    p.context = context;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    p.owner = this;
+    p.ref_count = 0;
+    // Assign some not outrageously out-of-place value for a while
+    p.depth = 0;
+    p.parent = parent;
+    // In TBB 2.1 and later, the constructor for task sets extra_state to indicate the version of the tbb/task.h header.
+    // In TBB 2.0 and earlier, the constructor leaves extra_state as zero.
+    p.extra_state = 0;
+    p.affinity = 0;
+    p.state = task::allocated;
+    return *t;
+}
+
+void generic_scheduler::free_nonlocal_small_task( task& t ) {
+    __TBB_ASSERT( t.state()==task::freed, NULL );
+    generic_scheduler& s = *static_cast<generic_scheduler*>(t.prefix().origin);
+    __TBB_ASSERT( &s!=this, NULL );
+    for(;;) {
+        task* old = s.return_list;
+        if( old==plugged_return_list() ) 
+            break;
+        // Atomically insert t at head of s.return_list
+        t.prefix().next = old; 
+        ITT_NOTIFY( sync_releasing, &s.return_list );
+        if( __TBB_CompareAndSwapW( &s.return_list, (intptr_t)&t, (intptr_t)old )==(intptr_t)old ) {
+            GATHER_STATISTIC(++my_counters.free_list_length);
+            return;
+        }
+    }
+    deallocate_task(t);
+    if( __TBB_FetchAndDecrementWrelease( &s.small_task_count )==1 ) {
+        // We freed the last task allocated by scheduler s, so it's our responsibility
+        // to free the scheduler.
+        NFS_Free( &s );
+    }
+}
+
+task** generic_scheduler::allocate_task_pool( size_t n ) {
+    __TBB_ASSERT( n > task_pool_size, "Cannot shrink the task pool" );
+    size_t byte_size = ((n * sizeof(task*) + NFS_MaxLineSize - 1) / NFS_MaxLineSize) * NFS_MaxLineSize;
+    task_pool_size = byte_size / sizeof(task*);
+    task** new_pool = (task**)NFS_Allocate( byte_size, 1, NULL );
+    // No need to clear the fresh deque since valid items are designated by the head and tail members.
+#if TBB_USE_ASSERT>=2
+    // But clear it in the high vigilance debug mode
+    memset( new_pool, reinterpret_cast<int>(poisoned_ptr), n );
+#endif /* TBB_USE_ASSERT>=2 */
+    return new_pool;
+}
+
+void generic_scheduler::grow_task_pool( size_t new_size ) {
+    assert_task_pool_valid();
+    if ( new_size < 2 * task_pool_size )
+        new_size = 2 * task_pool_size;
+    task** new_pool = allocate_task_pool( new_size ); // updates task_pool_size
+    task** old_pool = dummy_slot.task_pool;
+    acquire_task_pool();    // requires the old dummy_slot.task_pool value
+    my_arena_slot->tail -= my_arena_slot->head;
+    __TBB_ASSERT( my_arena_slot->tail <= task_pool_size, "new task pool is too short" );
+    memcpy( new_pool, old_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) );
+    my_arena_slot->head = 0;
+    dummy_slot.task_pool = new_pool;
+    release_task_pool();    // updates the task pool pointer in our arena slot
+    free_task_pool( old_pool );
+    assert_task_pool_valid();
+}
+
+/** ATTENTION: 
+    This method is mostly the same as generic_scheduler::lock_task_pool(), with 
+    a little different logic of slot state checks (slot is either locked or points 
+    to our task pool).
+    Thus if either of them is changed, consider changing the counterpart as well. **/
+inline void generic_scheduler::acquire_task_pool() const {
+    if ( !in_arena() )
+        return; // we are not in arena - nothing to lock
+    atomic_backoff backoff;
+    bool sync_prepare_done = false;
+    for(;;) {
+#if TBB_USE_ASSERT
+        __TBB_ASSERT( my_arena_slot == my_arena->slot + arena_index, "invalid arena slot index" );
+        // Local copy of the arena slot task pool pointer is necessary for the next 
+        // assertion to work correctly to exclude asynchronous state transition effect.
+        task** tp = my_arena_slot->task_pool;
+        __TBB_ASSERT( tp == LockedTaskPool || tp == dummy_slot.task_pool, "slot ownership corrupt?" );
+#endif
+        if( my_arena_slot->task_pool != LockedTaskPool && 
+            __TBB_CompareAndSwapW( &my_arena_slot->task_pool, (intptr_t)LockedTaskPool, 
+                                   (intptr_t)dummy_slot.task_pool ) == (intptr_t)dummy_slot.task_pool )
+        {
+            // We acquired our own slot
+            ITT_NOTIFY(sync_acquired, my_arena_slot);
+            break;
+        } 
+        else if( !sync_prepare_done ) {
+            // Start waiting
+            ITT_NOTIFY(sync_prepare, my_arena_slot);
+            sync_prepare_done = true;
+        }
+        // Someone else acquired a lock, so pause and do exponential backoff.
+        backoff.pause();
+    }
+    __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "not really acquired task pool" );
+} // generic_scheduler::acquire_task_pool
+
+inline void generic_scheduler::release_task_pool() const {
+    if ( !in_arena() )
+        return; // we are not in arena - nothing to unlock
+    __TBB_ASSERT( my_arena_slot, "we are not in arena" );
+    __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "arena slot is not locked" );
+    ITT_NOTIFY(sync_releasing, my_arena_slot);
+    __TBB_store_with_release( my_arena_slot->task_pool, dummy_slot.task_pool );
+}
+
+/** ATTENTION: 
+    This method is mostly the same as generic_scheduler::acquire_task_pool(), 
+    with a little different logic of slot state checks (slot can be empty, locked 
+    or point to any task pool other than ours, and asynchronous transitions between 
+    all these states are possible).
+    Thus if any of them is changed, consider changing the counterpart as well **/
+inline task** generic_scheduler::lock_task_pool( arena_slot* victim_arena_slot ) const {
+    task** victim_task_pool;
+    atomic_backoff backoff;
+    bool sync_prepare_done = false;
+    for(;;) {
+        victim_task_pool = victim_arena_slot->task_pool;
+        // NOTE: Do not use comparison of head and tail indices to check for
+        // the presence of work in the victim's task pool, as they may give
+        // incorrect indication because of task pool relocations and resizes.
+        if ( victim_task_pool == EmptyTaskPool ) {
+            // The victim thread emptied its task pool - nothing to lock
+            if( sync_prepare_done )
+                ITT_NOTIFY(sync_cancel, victim_arena_slot);
+            break;
+        }
+        if( victim_task_pool != LockedTaskPool && 
+            __TBB_CompareAndSwapW( &victim_arena_slot->task_pool, 
+                (intptr_t)LockedTaskPool, (intptr_t)victim_task_pool ) == (intptr_t)victim_task_pool )
+        {
+            // We've locked victim's task pool
+            ITT_NOTIFY(sync_acquired, victim_arena_slot);
+            break;
+        }
+        else if( !sync_prepare_done ) {
+            // Start waiting
+            ITT_NOTIFY(sync_prepare, victim_arena_slot);
+            sync_prepare_done = true;
+        }
+        GATHER_STATISTIC( ++my_counters.thieves_conflicts );
+        // Someone else acquired a lock, so pause and do exponential backoff.
+        backoff.pause();
+    }
+    __TBB_ASSERT( victim_task_pool == EmptyTaskPool || 
+                  (victim_arena_slot->task_pool == LockedTaskPool && victim_task_pool != LockedTaskPool), 
+                  "not really locked victim's task pool?" );
+    return victim_task_pool;
+} // generic_scheduler::lock_task_pool
+
+inline void generic_scheduler::unlock_task_pool( arena_slot* victim_arena_slot, 
+                                                task** victim_task_pool ) const {
+    __TBB_ASSERT( victim_arena_slot, "empty victim arena slot pointer" );
+    __TBB_ASSERT( victim_arena_slot->task_pool == LockedTaskPool, "victim arena slot is not locked" );
+    ITT_NOTIFY(sync_releasing, victim_arena_slot);
+    __TBB_store_with_release( victim_arena_slot->task_pool, victim_task_pool );
+}
+
+
+inline task* generic_scheduler::prepare_for_spawning( task* t ) {
+    __TBB_ASSERT( t->state()==task::allocated, "attempt to spawn task that is not in 'allocated' state" );
+    t->prefix().owner = this;
+    t->prefix().state = task::ready;
+#if TBB_USE_ASSERT
+    if( task* parent = t->parent() ) {
+        internal::reference_count ref_count = parent->prefix().ref_count;
+        __TBB_ASSERT( ref_count>=0, "attempt to spawn task whose parent has a ref_count<0" );
+        __TBB_ASSERT( ref_count!=0, "attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
+        parent->prefix().extra_state |= es_ref_count_active;
+    }
+#endif /* TBB_USE_ASSERT */
+    affinity_id dst_thread = t->prefix().affinity;
+    __TBB_ASSERT( dst_thread == 0 || is_version_3_task(*t), "backwards compatibility to TBB 2.0 tasks is broken" );
+    if( dst_thread != 0 && dst_thread != my_affinity_id ) {
+        task_proxy& proxy = (task_proxy&)allocate_task( sizeof(task_proxy), 
+                                                      __TBB_CONTEXT_ARG(NULL, NULL) );
+        // Mark as a proxy
+        proxy.prefix().extra_state = es_task_proxy;
+        proxy.outbox = &my_arena->mailbox(dst_thread);
+        proxy.task_and_tag = intptr_t(t)|3;
+        ITT_NOTIFY( sync_releasing, proxy.outbox );
+        // Mail the proxy - after this point t may be destroyed by another thread at any moment.
+        proxy.outbox->push(proxy);
+        return &proxy;
+    }
+    return t;
+}
+
+/** Conceptually, this method should be a member of class scheduler.
+    But doing so would force us to publish class scheduler in the headers. */
+void generic_scheduler::local_spawn( task& first, task*& next ) {
+    __TBB_ASSERT( governor::is_set(this), NULL );
+    assert_task_pool_valid();
+    if ( &first.prefix().next == &next ) {
+        // Single task is being spawned
+        if ( my_arena_slot->tail == task_pool_size ) {
+            // If the free space at the beginning of the task pool is too short
+            // we are likely facing a pathological single-producer-multiple-consumers
+            // scenario, and thus it's better to expand the task pool
+            if ( my_arena_slot->head > min_task_pool_size/4 ) {
+                // Move the busy part of the deque to the beginning of the allocated space
+                acquire_task_pool();
+                my_arena_slot->tail -= my_arena_slot->head;
+                memmove( dummy_slot.task_pool, dummy_slot.task_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) );
+                my_arena_slot->head = 0;
+                release_task_pool();
+            }
+            else {
+                grow_task_pool( task_pool_size + 1 );
+            }
+        }
+        dummy_slot.task_pool[my_arena_slot->tail] = prepare_for_spawning( &first );
+        ITT_NOTIFY(sync_releasing, my_arena_slot);
+        // The following store with release is required on ia64 only
+        size_t new_tail = my_arena_slot->tail + 1;
+        __TBB_store_with_release( my_arena_slot->tail, new_tail );
+        __TBB_ASSERT ( my_arena_slot->tail <= task_pool_size, "task deque end was overwritten" );
+    }
+    else {
+        // Task list is being spawned
+        const size_t initial_capacity = 64;
+        task *arr[initial_capacity];
+        fast_reverse_vector<task*> tasks(arr, initial_capacity);
+        task *t_next = NULL;
+        for( task* t = &first; ; t = t_next ) {
+            // After prepare_for_spawning returns t may already have been destroyed. 
+            // So milk it while it is alive.
+            bool end = &t->prefix().next == &next;
+            t_next = t->prefix().next;
+            tasks.push_back( prepare_for_spawning(t) );
+            if( end )
+                break;
+        }
+        size_t num_tasks = tasks.size();
+        __TBB_ASSERT ( arena_index != null_arena_index, "invalid arena slot index" );
+        if ( my_arena_slot->tail + num_tasks > task_pool_size ) {
+            // 1 compensates for head possibly temporarily incremented by a thief
+            size_t new_size = my_arena_slot->tail - my_arena_slot->head + num_tasks + 1;
+            if ( new_size <= task_pool_size ) {
+                // Move the busy part of the deque to the beginning of the allocated space
+                acquire_task_pool();
+                my_arena_slot->tail -= my_arena_slot->head;
+                memmove( dummy_slot.task_pool, dummy_slot.task_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) );
+                my_arena_slot->head = 0;
+                release_task_pool();
+            }
+            else {
+                grow_task_pool( new_size );
+            }
+        }
+#if DO_ITT_NOTIFY
+        else {
+            // The preceding if-branch issues the same ittnotify inside release_task_pool() or grow_task_pool() methods
+            ITT_NOTIFY(sync_releasing, my_arena_slot);
+        }
+#endif /* DO_ITT_NOTIFY */
+        tasks.copy_memory( dummy_slot.task_pool + my_arena_slot->tail );
+        // The following store with release is required on ia64 only
+        size_t new_tail = my_arena_slot->tail + num_tasks;
+        __TBB_store_with_release( my_arena_slot->tail, new_tail );
+        __TBB_ASSERT ( my_arena_slot->tail <= task_pool_size, "task deque end was overwritten" );
+    }
+#if __TBB_ARENA_PER_MASTER
+    if ( !in_arena() )
+        enter_arena();
+    my_arena->advertise_new_work</*Spawned=*/true>();
+#else /* !__TBB_ARENA_PER_MASTER */
+    if ( !in_arena() ) {
+        if ( is_worker() )
+            enter_arena();
+        else
+            try_enter_arena();
+    }
+    my_arena->mark_pool_full();
+#endif /* !__TBB_ARENA_PER_MASTER */
+    assert_task_pool_valid();
+}
+
+void generic_scheduler::local_spawn_root_and_wait( task& first, task*& next ) {
+    __TBB_ASSERT( governor::is_set(this), NULL );
+    __TBB_ASSERT( &first, NULL );
+    auto_empty_task dummy( __TBB_CONTEXT_ARG(this, first.prefix().context) );
+    internal::reference_count n = 0;
+    for( task* t=&first; ; t=t->prefix().next ) {
+        ++n;
+        __TBB_ASSERT( !t->prefix().parent, "not a root task, or already running" );
+        t->prefix().parent = &dummy;
+        if( &t->prefix().next==&next ) break;
+#if __TBB_TASK_GROUP_CONTEXT
+        __TBB_ASSERT( t->prefix().context == t->prefix().next->prefix().context, 
+                    "all the root tasks in list must share the same context");
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    }
+    dummy.prefix().ref_count = n+1;
+    if( n>1 )
+        local_spawn( *first.prefix().next, next );
+    local_wait_for_all( dummy, &first );
+}
+
+inline task* generic_scheduler::get_mailbox_task() {
+    __TBB_ASSERT( my_affinity_id>0, "not in arena" );
+    task* result = NULL;
+    while( task_proxy* t = inbox.pop() ) {
+        intptr_t tat = __TBB_load_with_acquire(t->task_and_tag);
+        __TBB_ASSERT( tat==task_proxy::mailbox_bit || (tat==(tat|3)&&tat!=3), NULL );
+        if( tat!=task_proxy::mailbox_bit && __TBB_CompareAndSwapW( &t->task_and_tag, task_proxy::pool_bit, tat )==tat ) {
+            // Successfully grabbed the task, and left pool seeker with job of freeing the proxy.
+            ITT_NOTIFY( sync_acquired, inbox.outbox() );
+            result = (task*)(tat & ~3);
+            result->prefix().owner = this;
+            break;
+        }
+        free_task_proxy( *t );
+    }
+    return result;
+}
+
+inline task* generic_scheduler::strip_proxy( task_proxy* tp ) {
+    __TBB_ASSERT( tp->prefix().extra_state==es_task_proxy, NULL );
+    intptr_t tat = __TBB_load_with_acquire(tp->task_and_tag);
+    if( (tat&3)==3 ) {
+        // proxy is shared by a pool and a mailbox.
+        // Attempt to transition it to "empty proxy in mailbox" state.
+        if( __TBB_CompareAndSwapW( &tp->task_and_tag, task_proxy::mailbox_bit, tat )==tat ) {
+            // Successfully grabbed the task, and left the mailbox with the job of freeing the proxy.
+            return (task*)(tat&~3);
+        }
+        __TBB_ASSERT( tp->task_and_tag==task_proxy::pool_bit, NULL );
+    } else {
+        // We have exclusive access to the proxy
+        __TBB_ASSERT( (tat&3)==task_proxy::pool_bit, "task did not come from pool?" );
+        __TBB_ASSERT ( !(tat&~3), "Empty proxy in the pool contains non-zero task pointer" );
+    }
+#if TBB_USE_ASSERT
+    tp->prefix().state = task::allocated;
+#endif
+    free_task_proxy( *tp );
+    // Another thread grabbed the underlying task via their mailbox
+    return NULL;
+}
+
+#if __TBB_ARENA_PER_MASTER
+void generic_scheduler::local_enqueue( task& t ) {
+    __TBB_ASSERT( governor::is_set(this), NULL );
+    __TBB_ASSERT( t.state()==task::allocated, "attempt to enqueue task that is not in 'allocated' state" );
+    t.prefix().owner = this;
+    t.prefix().state = task::ready;
+
+#if TBB_USE_ASSERT
+    if( task* parent = t.parent() ) {
+        internal::reference_count ref_count = parent->prefix().ref_count;
+        __TBB_ASSERT( ref_count>=0, "attempt to enqueue task whose parent has a ref_count<0" );
+        __TBB_ASSERT( ref_count!=0, "attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
+        parent->prefix().extra_state |= es_ref_count_active;
+    }
+    __TBB_ASSERT(t.prefix().affinity==affinity_id(0), "affinity is ignored for enqueued tasks");
+#endif /* TBB_USE_ASSERT */
+
+    __TBB_ASSERT( my_arena, "thread is not in any arena" );
+    ITT_NOTIFY(sync_releasing, &my_arena->my_task_stream);
+    my_arena->my_task_stream.push( &t, my_arena_slot->hint_for_push );
+    my_arena->advertise_new_work< /*Spawned=*/ false >();
+    assert_task_pool_valid();
+}
+
+inline task* generic_scheduler::dequeue_task() {
+    task* result = NULL;
+    my_arena->my_task_stream.pop(result, my_arena_slot->hint_for_pop);
+    if (result) ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream);
+    return result;
+}
+#endif /* __TBB_ARENA_PER_MASTER */
+
+inline task* generic_scheduler::get_task() {
+    task* result = NULL;
+retry:
+    --my_arena_slot->tail;
+    __TBB_full_memory_fence();
+    if ( (intptr_t)my_arena_slot->head > (intptr_t)my_arena_slot->tail ) {
+        acquire_task_pool();
+        if ( (intptr_t)my_arena_slot->head <= (intptr_t)my_arena_slot->tail ) {
+            // The thief backed off - grab the task
+            result = dummy_slot.task_pool[my_arena_slot->tail];
+            __TBB_ASSERT( !is_poisoned(result), NULL );
+            poison_pointer( dummy_slot.task_pool[my_arena_slot->tail] );
+        }
+        else {
+            __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail + 1, "victim/thief arbitration algorithm failure" );
+        }
+        if ( (intptr_t)my_arena_slot->head < (intptr_t)my_arena_slot->tail ) {
+            release_task_pool();
+        }
+        else {
+            // In any case the deque is empty now, so compact it
+            my_arena_slot->head = my_arena_slot->tail = 0;
+            if ( in_arena() )
+                leave_arena();
+        }
+    }
+    else {
+        result = dummy_slot.task_pool[my_arena_slot->tail];
+        __TBB_ASSERT( !is_poisoned(result), NULL );
+        poison_pointer( dummy_slot.task_pool[my_arena_slot->tail] );
+    }
+    if( result && is_proxy(*result) ) {
+        result = strip_proxy((task_proxy*)result);
+        if( !result ) {
+            goto retry;
+        }
+        GATHER_STATISTIC( ++my_counters.proxies_executed );
+        // Following assertion should be true because TBB 2.0 tasks never specify affinity, and hence are not proxied.
+        __TBB_ASSERT( is_version_3_task(*result), "backwards compatibility with TBB 2.0 broken" );
+        // Task affinity has changed.
+        innermost_running_task = result;
+        result->note_affinity(my_affinity_id);
+    }
+    return result;
+} // generic_scheduler::get_task
+
+task* generic_scheduler::steal_task( arena_slot& victim_slot ) {
+    task** victim_pool = lock_task_pool( &victim_slot );
+    if ( !victim_pool )
+        return NULL;
+    const size_t none = ~size_t(0);
+    size_t first_skipped_proxy = none;
+    task* result = NULL;
+retry:
+    ++victim_slot.head;
+    __TBB_full_memory_fence();
+    if ( (intptr_t)victim_slot.head > (intptr_t)victim_slot.tail ) {
+        --victim_slot.head;
+    }
+    else {
+        result = victim_pool[victim_slot.head - 1];
+        __TBB_ASSERT( !is_poisoned(result), NULL );
+        if( is_proxy(*result) ) {
+            task_proxy& tp = *static_cast<task_proxy*>(result);
+            // If task will likely be grabbed by whom it was mailed to, skip it.
+            if( (tp.task_and_tag & 3) == 3 && tp.outbox->recipient_is_idle() ) {
+                GATHER_STATISTIC( ++my_counters.proxies_bypassed );
+                if ( first_skipped_proxy == none )
+                    first_skipped_proxy = victim_slot.head - 1;
+                result = NULL;
+                goto retry;
+            }
+        }
+        poison_pointer(victim_pool[victim_slot.head - 1]);
+    }
+    if ( first_skipped_proxy != none ) {
+        if ( result ) {
+            victim_pool[victim_slot.head - 1] = victim_pool[first_skipped_proxy];
+            poison_pointer( victim_pool[first_skipped_proxy] );
+            __TBB_store_with_release( victim_slot.head, first_skipped_proxy + 1 );
+        }
+        else
+            __TBB_store_with_release( victim_slot.head, first_skipped_proxy );
+    }
+    unlock_task_pool( &victim_slot, victim_pool );
+    return result;
+}
+
+inline void generic_scheduler::do_enter_arena() {
+    my_arena_slot = &my_arena->slot[arena_index];
+    __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail, "task deque of a free slot must be empty" );
+    __TBB_ASSERT ( dummy_slot.head < dummy_slot.tail, "entering arena without tasks to share" );
+    my_arena_slot->head = dummy_slot.head;
+    my_arena_slot->tail = dummy_slot.tail;
+    // Release signal on behalf of previously spawned tasks (when this thread was not in arena yet)
+    ITT_NOTIFY(sync_releasing, my_arena_slot);
+    __TBB_store_with_release( my_arena_slot->task_pool, dummy_slot.task_pool );
+    // We'll leave arena only when it's empty, so clean up local instances of indices.
+    dummy_slot.head = dummy_slot.tail = 0;
+}
+
+void generic_scheduler::enter_arena() {
+    __TBB_ASSERT ( my_arena, "no arena: initialization not completed?" );
+#if __TBB_ARENA_PER_MASTER
+    __TBB_ASSERT ( !in_arena(), "thread is already in arena?" );
+    __TBB_ASSERT ( arena_index < my_arena->my_num_slots, "arena slot index is out-of-bound" );
+#else /* !__TBB_ARENA_PER_MASTER */
+    __TBB_ASSERT ( is_worker(), "only workers should use enter_arena()" );
+    __TBB_ASSERT ( !in_arena(), "worker already in arena?" );
+    __TBB_ASSERT ( arena_index < my_arena->prefix().number_of_workers, "invalid worker arena slot index" );
+#endif /* !__TBB_ARENA_PER_MASTER */
+    __TBB_ASSERT ( my_arena->slot[arena_index].task_pool == EmptyTaskPool, "someone else grabbed my arena slot?" );
+    do_enter_arena();
+}
+
+#if !__TBB_ARENA_PER_MASTER
+void generic_scheduler::try_enter_arena() {
+    __TBB_ASSERT ( !is_worker(), "only masters should use try_enter_arena()" );
+    __TBB_ASSERT ( my_arena, "no arena: initialization not completed?" );
+    __TBB_ASSERT ( !in_arena(), "master already in arena?" );
+    __TBB_ASSERT ( arena_index >= my_arena->prefix().number_of_workers && 
+                   arena_index < my_arena->prefix().number_of_slots, "invalid arena slot hint value" );
+
+    size_t h = arena_index;
+    // We do not lock task pool upon successful entering arena
+    if( my_arena->slot[h].task_pool != EmptyTaskPool || 
+        __TBB_CompareAndSwapW( &my_arena->slot[h].task_pool, (intptr_t)LockedTaskPool, 
+                                                          (intptr_t)EmptyTaskPool ) != (intptr_t)EmptyTaskPool )
+    {
+        // Hinted arena slot is already busy, try some of the others at random
+        unsigned first = my_arena->prefix().number_of_workers,
+                 last = my_arena->prefix().number_of_slots;
+        unsigned n = last - first - 1;
+        /// \todo Is this limit reasonable?
+        size_t max_attempts = last - first;
+        for (;;) {
+            size_t k = first + random.get() % n;
+            if( k >= h )
+                ++k;    // Adjusts random distribution to exclude previously tried slot
+            h = k;
+            if( my_arena->slot[h].task_pool == EmptyTaskPool && 
+                __TBB_CompareAndSwapW( &my_arena->slot[h].task_pool, (intptr_t)LockedTaskPool, 
+                                                                  (intptr_t)EmptyTaskPool ) == (intptr_t)EmptyTaskPool )
+            {
+                break;
+            }
+            if ( --max_attempts == 0 ) {
+                // After so many attempts we are still unable to find a vacant arena slot.
+                // Cease the vain effort and work outside of arena for a while.
+                return;
+            }
+        }
+    }
+    // Successfully claimed a slot in the arena.
+    ITT_NOTIFY(sync_acquired, &my_arena->slot[h]);
+    __TBB_ASSERT ( my_arena->slot[h].task_pool == LockedTaskPool, "arena slot is not actually acquired" );
+    arena_index = h;
+    do_enter_arena();
+    attach_mailbox( affinity_id(h+1) );
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+void generic_scheduler::leave_arena() {
+    __TBB_ASSERT( in_arena(), "Not in arena" );
+    // Do not reset arena_index. It will be used to (attempt to) re-acquire the slot next time
+    __TBB_ASSERT( &my_arena->slot[arena_index] == my_arena_slot, "arena slot and slot index mismatch" );
+    __TBB_ASSERT ( my_arena_slot->task_pool == LockedTaskPool, "Task pool must be locked when leaving arena" );
+    __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail, "Cannot leave arena when the task pool is not empty" );
+#if !__TBB_ARENA_PER_MASTER
+    if ( !is_worker() ) {
+        my_affinity_id = 0;
+        inbox.detach();
+    }
+#endif /* !__TBB_ARENA_PER_MASTER */
+    ITT_NOTIFY(sync_releasing, &my_arena->slot[arena_index]);
+    __TBB_store_with_release( my_arena_slot->task_pool, EmptyTaskPool );
+    my_arena_slot = &dummy_slot;
+}
+
+#if __TBB_ARENA_PER_MASTER
+generic_scheduler* generic_scheduler::create_worker( market& m, size_t index ) {
+    generic_scheduler* s = allocate_scheduler( NULL, index );
+#if __TBB_TASK_GROUP_CONTEXT
+    s->dummy_task->prefix().context = &dummy_context;
+    // Sync up the local cancellation state with the global one. No need for fence here.
+    s->local_cancel_count = global_cancel_count;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    s->my_market = &m;
+    s->init_stack_info();
+    return s;
+}
+
+#else /* !__TBB_ARENA_PER_MASTER */
+
+generic_scheduler* generic_scheduler::create_worker( arena& a, size_t index ) {
+    generic_scheduler* s = allocate_scheduler( &a, index );
+
+    // Put myself into the arena
+#if __TBB_TASK_GROUP_CONTEXT
+    s->dummy_task->prefix().context = &dummy_context;
+    // Sync up the local cancellation state with the global one. No need for fence here.
+    s->local_cancel_count = global_cancel_count;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    s->attach_mailbox( index+1 );
+    s->init_stack_info();
+
+    __TBB_store_with_release( a.prefix().worker_list[index].scheduler, s );
+    return s;
+}
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+generic_scheduler* generic_scheduler::create_master( arena& a ) {
+    generic_scheduler* s = allocate_scheduler( &a,
+#if __TBB_ARENA_PER_MASTER
+        0                   // Master thread always occupies the first slot
+#else /* !__TBB_ARENA_PER_MASTER */
+        null_arena_index    // Master thread will have to search for a vacant slot
+#endif /* !__TBB_ARENA_PER_MASTER */
+        );
+    task& t = *s->dummy_task;
+    s->innermost_running_task = &t;
+    t.prefix().ref_count = 1;
+    governor::sign_on(s);
+    __TBB_ASSERT( &task::self()==&t, "governor::sign_on failed?" );
+#if __TBB_ARENA_PER_MASTER
+#if __TBB_TASK_GROUP_CONTEXT
+    // Context to be used by root tasks by default (if the user has not specified one).
+    // Allocation is done by NFS allocator because we cannot reuse memory allocated 
+    // for task objects since the free list is empty at the moment.
+    t.prefix().context = a.my_master_default_ctx = 
+        new ( NFS_Allocate(sizeof(task_group_context), 1, NULL) ) task_group_context(task_group_context::isolated);
+#endif
+    s->my_market = a.my_market;
+    __TBB_ASSERT( s->arena_index == 0, "Master thread must occupy the first slot in its arena" );
+    s->attach_mailbox(1);
+    a.slot[0].my_scheduler = s;
+#if _WIN32|_WIN64
+    __TBB_ASSERT( s->my_market, NULL );
+    s->my_market->register_master( s->master_exec_resource );
+#endif /* _WIN32|_WIN64 */
+#else /* !__TBB_ARENA_PER_MASTER */
+#if _WIN32|_WIN64
+    s->register_master();
+#endif 
+#if __TBB_TASK_GROUP_CONTEXT
+    // Context to be used by root tasks by default (if the user has not specified one).
+    // Allocation is done by NFS allocator because we cannot reuse memory allocated 
+    // for task objects since the free list is empty at the moment.
+    t.prefix().context = new ( NFS_Allocate(sizeof(task_group_context), 1, NULL) ) task_group_context(task_group_context::isolated);
+    scheduler_list_node_t &node = s->my_node;
+    {
+        mutex::scoped_lock lock(the_scheduler_list_mutex);
+        node.my_next = the_scheduler_list_head.my_next;
+        node.my_prev = &the_scheduler_list_head;
+        the_scheduler_list_head.my_next->my_prev = &node;
+        the_scheduler_list_head.my_next = &node;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+        unsigned last = a.prefix().number_of_slots,
+                 cur_limit = a.prefix().limit;
+        // This slot index assignment is just a hint to ...
+        if ( cur_limit < last ) {
+            // ... to prevent competition between the first few masters.
+            s->arena_index = cur_limit++;
+            // In the absence of exception handling this code is a subject to data 
+            // race in case of multiple masters concurrently entering empty arena.
+            // But it does not affect correctness, and can only result in a few 
+            // masters competing for the same arena slot during the first acquisition.
+            // The cost of competition is low in comparison to that of oversubscription.
+            a.prefix().limit = cur_limit;
+        }
+        else {
+            // ... to minimize the probability of competition between multiple masters.
+            unsigned first = a.prefix().number_of_workers;
+            s->arena_index = first + s->random.get() % (last - first);
+        }
+#if __TBB_TASK_GROUP_CONTEXT
+    }
+#endif
+#endif /* !__TBB_ARENA_PER_MASTER */
+    s->init_stack_info();
+#if __TBB_TASK_GROUP_CONTEXT
+    // Sync up the local cancellation state with the global one. No need for fence here.
+    s->local_cancel_count = global_cancel_count;
+#endif
+#if __TBB_SCHEDULER_OBSERVER
+    // Process any existing observers.
+    s->notify_entry_observers();
+#endif /* __TBB_SCHEDULER_OBSERVER */
+    return s;
+}
+
+void generic_scheduler::cleanup_worker( void* arg, bool is_worker ) {
+    generic_scheduler& s = *(generic_scheduler*)arg;
+    __TBB_ASSERT( s.dummy_slot.task_pool, "cleaning up worker with missing task pool" );
+// APM TODO: Decide how observers should react to each entry/leave to/from arena
+#if __TBB_SCHEDULER_OBSERVER
+    s.notify_exit_observers( is_worker );
+#endif /* __TBB_SCHEDULER_OBSERVER */
+    // When comparing "head" and "tail" indices ">=" is used because this worker's
+    // task pool may still be published in the arena, and thieves can optimistically
+    // bump "head" (and then roll back).
+    __TBB_ASSERT( s.my_arena_slot->task_pool == EmptyTaskPool || s.my_arena_slot->head >= s.my_arena_slot->tail, 
+                  "worker has unfinished work at run down" );
+    s.free_scheduler();
+}
+
+void generic_scheduler::cleanup_master() {
+    generic_scheduler& s = *this; // for similarity with cleanup_worker
+    __TBB_ASSERT( s.dummy_slot.task_pool, "cleaning up master with missing task pool" );
+#if __TBB_SCHEDULER_OBSERVER
+    s.notify_exit_observers(/*is_worker=*/false);
+#endif /* __TBB_SCHEDULER_OBSERVER */
+    if ( !local_task_pool_empty() ) {
+        __TBB_ASSERT ( governor::is_set(this), "TLS slot is cleared before the task pool cleanup" );
+        s.local_wait_for_all( *s.dummy_task, NULL );
+        __TBB_ASSERT ( governor::is_set(this), "Other thread reused our TLS key during the task pool cleanup" );
+    }
+#if __TBB_ARENA_PER_MASTER
+#if _WIN32|_WIN64
+    __TBB_ASSERT( s.my_market, NULL );
+    s.my_market->unregister_master( s.master_exec_resource );
+#endif /* _WIN32|_WIN64 */
+    arena* a = s.my_arena;
+#if __TBB_STATISTICS
+    *a->slot[0].my_counters += s.my_counters;
+#endif /* __TBB_STATISTICS */
+#else /* !__TBB_ARENA_PER_MASTER */
+#if _WIN32|_WIN64
+    s.unregister_master();
+#endif /* _WIN32|_WIN64 */
+#endif /* __TBB_ARENA_PER_MASTER */
+    s.free_scheduler();
+#if __TBB_ARENA_PER_MASTER
+    a->slot[0].my_scheduler = NULL;
+    // Do not close arena if some fire-and-forget tasks remain; workers should care of it.
+    if( a->my_task_stream.empty() && a->pool_state.fetch_and_store(arena::SNAPSHOT_EMPTY)!=arena::SNAPSHOT_EMPTY )
+        a->my_market->adjust_demand( *a, -(int)a->my_max_num_workers );
+#if __TBB_STATISTICS_EARLY_DUMP
+    GATHER_STATISTIC( a->dump_arena_statistics() );
+#endif
+    if ( --a->my_num_threads_active==0 && a->pool_state==arena::SNAPSHOT_EMPTY )
+        a->close_arena();
+#else /* !__TBB_ARENA_PER_MASTER */
+    governor::finish_with_arena();
+#endif /* !__TBB_ARENA_PER_MASTER */
+}
+
+#if __TBB_SCHEDULER_OBSERVER
+    void generic_scheduler::notify_entry_observers() {
+        local_last_observer_proxy = observer_proxy::process_list(local_last_observer_proxy,is_worker(),/*is_entry=*/true);
+    }
+
+    void generic_scheduler::notify_exit_observers( bool is_worker ) {
+        observer_proxy::process_list(local_last_observer_proxy,is_worker,/*is_entry=*/false);
+    }
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+} // namespace internal
+} // namespace tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler.h
new file mode 100644 (file)
index 0000000..7adbb3c
--- /dev/null
@@ -0,0 +1,556 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_scheduler_H
+#define _TBB_scheduler_H
+
+#include "scheduler_common.h"
+#include "arena.h"
+#include "mailbox.h"
+#include "tbb_misc.h" // for FastRandom
+
+#if __TBB_TASK_GROUP_CONTEXT
+#include "tbb/spin_mutex.h"
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+#include "cilk-tbb-interop.h"
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+namespace tbb {
+namespace internal {
+
+template<typename SchedulerTraits> class custom_scheduler;
+
+//------------------------------------------------------------------------
+// generic_scheduler
+//------------------------------------------------------------------------
+
+#if __TBB_TASK_GROUP_CONTEXT
+struct scheduler_list_node_t {
+    scheduler_list_node_t *my_prev,
+                          *my_next;
+};
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#define EmptyTaskPool ((task**)0)
+#define LockedTaskPool ((task**)~(intptr_t)0)
+
+class governor;
+
+#if __TBB_SCHEDULER_OBSERVER
+class task_scheduler_observer_v3;
+class observer_proxy;
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+#if __TBB_ARENA_PER_MASTER
+class market;
+#endif
+
+//! Cilk-style task scheduler.
+/** None of the fields here are every read or written by threads other than
+    the thread that creates the instance.
+
+    Class generic_scheduler is an abstract base class that contains most of the scheduler,
+    except for tweaks specific to processors and tools (e.g. VTune).
+    The derived template class custom_scheduler<SchedulerTraits> fills in the tweaks. */
+class generic_scheduler: public scheduler, public ::rml::job {
+    friend class tbb::task;
+#if __TBB_ARENA_PER_MASTER
+    friend class market;
+#else
+    friend class UnpaddedArenaPrefix;
+#endif /* !__TBB_ARENA_PER_MASTER */
+    friend class arena;
+    friend class allocate_root_proxy;
+    friend class governor;
+#if __TBB_TASK_GROUP_CONTEXT
+    friend class allocate_root_with_context_proxy;
+    friend class tbb::task_group_context;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+#if __TBB_SCHEDULER_OBSERVER
+    friend class task_scheduler_observer_v3;
+#endif /* __TBB_SCHEDULER_OBSERVER */
+    friend class scheduler;
+    template<typename SchedulerTraits> friend class custom_scheduler;
+
+    //! If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
+    static const size_t quick_task_size = 256-task_prefix_reservation_size;
+
+    static bool is_version_3_task( task& t ) {
+        return (t.prefix().extra_state & 0x0F)>=0x1;
+    }
+
+    //! Position in the call stack specifying its maximal filling when stealing is still allowed
+    uintptr_t my_stealing_threshold;
+#if __TBB_ipf
+    //! Position in the RSE backup area specifying its maximal filling when stealing is still allowed
+    uintptr_t my_rsb_stealing_threshold;
+#endif
+
+    static const size_t null_arena_index = ~size_t(0);
+
+    //! Index of the arena slot the scheduler occupies now, or occupied last time.
+    size_t arena_index;
+
+    //! Capacity of ready tasks deque (number of elements - pointers to task).
+    size_t task_pool_size;
+
+    //! Pointer to the slot in the arena we own at the moment.
+    /** When out of arena it points to this scheduler's dummy_slot. **/
+    mutable arena_slot* my_arena_slot;
+
+    bool in_arena () const { return my_arena_slot != &dummy_slot; }
+
+    bool local_task_pool_empty () {
+        return my_arena_slot->task_pool == EmptyTaskPool || my_arena_slot->head >= my_arena_slot->tail;
+    }
+
+#if __TBB_ARENA_PER_MASTER
+    //! The market I am in
+    market* my_market;
+
+    //! The arena that I own (if master) or am servicing at the moment (if worker)
+    arena* my_arena;
+#else /* !__TBB_ARENA_PER_MASTER */
+    //! The arena that I own (if master) or am servicing at the moment (if worker)
+    arena* const my_arena;
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Random number generator used for picking a random victim from which to steal.
+    FastRandom random;
+
+    //! Free list of small tasks that can be reused.
+    task* free_list;
+
+    //! Innermost task whose task::execute() is running.
+    task* innermost_running_task;
+
+    //! Fake root task created by slave threads.
+    /** The task is used as the "parent" argument to method wait_for_all. */
+    task* dummy_task;
+
+    //! Reference count for scheduler
+    /** Number of task_scheduler_init objects that point to this scheduler */
+    long ref_count;
+
+    mail_inbox inbox;
+
+    void attach_mailbox( affinity_id id ) {
+        __TBB_ASSERT(id>0,NULL);
+        inbox.attach( my_arena->mailbox(id) );
+        my_affinity_id = id;
+    }
+
+    //! The mailbox id assigned to this scheduler.
+    /** The id is assigned upon first entry into the arena.
+        TODO: how are id's being garbage collected? 
+        TODO: master thread may enter arena and leave and then reenter.
+                We want to give it the same affinity_id upon reentry, if practical.
+      */
+    affinity_id my_affinity_id;
+
+    /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */
+
+    //! True if this is assigned to thread local storage by registering with governor.
+    bool is_registered;
+
+    //! True if *this was created by automatic TBB initialization
+    bool is_auto_initialized;
+
+#if __TBB_SCHEDULER_OBSERVER
+    //! Last observer_proxy processed by this scheduler
+    observer_proxy* local_last_observer_proxy;
+
+    //! Notify any entry observers that have been created since the last call by this thread.
+    void notify_entry_observers();
+    //! Notify all exit observers that this thread is no longer participating in task scheduling.
+    void notify_exit_observers( bool is_worker );
+#endif /* __TBB_SCHEDULER_OBSERVER */
+
+#if __TBB_COUNT_TASK_NODES
+    //! Net number of big task objects that have been allocated but not yet freed.
+    intptr_t task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+
+    //! Sets up the data necessary for the stealing limiting heuristics
+    void init_stack_info ();
+
+    //! Returns true if stealing is allowed
+    bool can_steal () {
+        int anchor;
+#if __TBB_ipf
+        return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold;
+#else
+        return my_stealing_threshold < (uintptr_t)&anchor;
+#endif
+    }
+
+    //! Actions common to enter_arena and try_enter_arena
+    void do_enter_arena();
+
+    //! Used by workers to enter the arena 
+    /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/
+    void enter_arena();
+
+#if !__TBB_ARENA_PER_MASTER
+    //! Used by masters to try to enter the arena
+    /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/
+    void try_enter_arena();
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Leave the arena
+    void leave_arena();
+
+    //! Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
+    task** lock_task_pool( arena_slot* victim_arena_slot ) const;
+
+    //! Unlocks victim's task pool
+    void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const;
+
+
+    //! Locks the local task pool
+    void acquire_task_pool() const;
+
+    //! Unlocks the local task pool
+    void release_task_pool() const;
+
+    //! Checks if t is affinitized to another thread, and if so, bundles it as proxy.
+    /** Returns either t or proxy containing t. **/
+    task* prepare_for_spawning( task* t );
+
+    //! Get a task from the local pool.
+    /** Called only by the pool owner.
+        Returns the pointer to the task or NULL if the pool is empty. 
+        In the latter case compacts the pool. **/
+    task* get_task();
+
+    //! Attempt to get a task from the mailbox.
+    /** Called only by the thread that owns *this.
+        Gets a task only if there is one not yet executed by another thread.
+        If successful, unlinks the task and returns a pointer to it.
+        Otherwise returns NULL. */
+    task* get_mailbox_task();
+
+    //! True if t is a task_proxy
+    static bool is_proxy( const task& t ) {
+        return t.prefix().extra_state==es_task_proxy;
+    }
+
+    //! Extracts task pointer from task_proxy, and frees the proxy.
+    /** Return NULL if underlying task was claimed by mailbox. */
+    task* strip_proxy( task_proxy* result );
+
+#if __TBB_ARENA_PER_MASTER
+    //! Get a task from the starvation-resistant task stream of the current arena.
+    /** Returns the pointer to the task, or NULL if the attempt was unsuccessful. 
+        The latter case does not mean that the stream is drained, however. **/
+    task* dequeue_task();
+
+#endif /* __TBB_ARENA_PER_MASTER */
+    //! Steal task from another scheduler's ready pool.
+    task* steal_task( arena_slot& victim_arena_slot );
+
+    /** Initial size of the task deque sufficient to serve without reallocation
+        4 nested parallel_for calls with iteration space of 65535 grains each. **/
+    static const size_t min_task_pool_size = 64;
+
+    //! Allocate task pool containing at least n elements.
+    task** allocate_task_pool( size_t n );
+
+    //! Deallocate task pool that was allocated by means of allocate_task_pool.
+    static void free_task_pool( task** pool ) {
+        __TBB_ASSERT( pool, "attempt to free NULL TaskPool" );
+        NFS_Free( pool );
+    }
+
+    //! Grow ready task deque to at least n elements.
+    void grow_task_pool( size_t n );
+
+    //! Initialize a scheduler for a master thread.
+    static generic_scheduler* create_master( arena& a );
+
+    //! Perform necessary cleanup when a master thread stops using TBB.
+    void cleanup_master();
+
+    //! Initialize a scheduler for a worker thread.
+#if __TBB_ARENA_PER_MASTER
+    static generic_scheduler* create_worker( market& m, size_t index );
+#else /* !__TBB_ARENA_PER_MASTER */
+    static generic_scheduler* create_worker( arena& a, size_t index );
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Perform necessary cleanup when a worker thread finishes.
+    static void cleanup_worker( void* arg, bool is_worker );
+
+protected:
+    generic_scheduler( arena*, size_t index );
+
+#if TBB_USE_ASSERT > 1
+    //! Check that internal data structures are in consistent state.
+    /** Raises __TBB_ASSERT failure if inconsistency is found. */
+    void assert_task_pool_valid() const;
+#else
+    void assert_task_pool_valid() const {}
+#endif /* TBB_USE_ASSERT <= 1 */
+
+public:
+    /*override*/ 
+    void spawn( task& first, task*& next );
+
+    /*override*/ 
+    void spawn_root_and_wait( task& first, task*& next );
+
+#if __TBB_ARENA_PER_MASTER
+    /*override*/ 
+    void enqueue( task& task_, void* reserved );
+
+    void local_enqueue( task& task_ );
+#endif /* __TBB_ARENA_PER_MASTER */
+
+    void local_spawn( task& first, task*& next );
+    void local_spawn_root_and_wait( task& first, task*& next );
+    virtual void local_wait_for_all( task& parent, task* child ) = 0;
+
+    //! Destroy and deallocate this scheduler object
+    void free_scheduler();
+
+    //! Allocate task object, either from the heap or a free list.
+    /** Returns uninitialized task object with initialized prefix. */
+    task& allocate_task( size_t number_of_bytes, 
+                       __TBB_CONTEXT_ARG(task* parent, task_group_context* context) );
+
+    //! Put task on free list.
+    /** Does not call destructor. */
+    template<free_task_hint h>
+    void free_task( task& t );
+
+    void free_task_proxy( task_proxy& tp ) {
+#if TBB_USE_ASSERT
+        poison_pointer( tp.outbox );
+        poison_pointer( tp.next_in_mailbox );
+        tp.task_and_tag = 0xDEADBEEF;
+#endif /* TBB_USE_ASSERT */
+        free_task<small_task>(tp);
+    }
+
+    //! Return task object to the memory allocator.
+    void deallocate_task( task& t ) {
+#if TBB_USE_ASSERT
+        task_prefix& p = t.prefix();
+        p.state = 0xFF;
+        p.extra_state = 0xFF; 
+        poison_pointer(p.next);
+#endif /* TBB_USE_ASSERT */
+        NFS_Free((char*)&t-task_prefix_reservation_size);
+#if __TBB_COUNT_TASK_NODES
+        --task_node_count;
+#endif /* __TBB_COUNT_TASK_NODES */
+    }
+
+    //! True if running on a worker thread, false otherwise.
+    bool is_worker() {
+#if __TBB_ARENA_PER_MASTER
+        return arena_index != 0;
+#else /* !__TBB_ARENA_PER_MASTER */
+        return arena_index < my_arena->prefix().number_of_workers;
+#endif /* !__TBB_ARENA_PER_MASTER */
+    }
+
+#if __TBB_ARENA_PER_MASTER
+    //! Returns number of worker threads in the arena this thread belongs to.
+    unsigned number_of_workers_in_my_arena() {
+        return my_arena->my_max_num_workers;
+    }
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#if __TBB_COUNT_TASK_NODES
+    intptr_t get_task_node_count( bool count_arena_workers = false ) {
+        return task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0);
+    }
+#endif /* __TBB_COUNT_TASK_NODES */
+
+    //! Special value used to mark return_list as not taking any more entries.
+    static task* plugged_return_list() {return (task*)(intptr_t)(-1);}
+
+    //! Number of small tasks that have been allocated by this scheduler. 
+    intptr_t small_task_count;
+
+    //! List of small tasks that have been returned to this scheduler by other schedulers.
+    task* return_list;
+
+    //! Try getting a task from the mailbox or stealing from another scheduler.
+    /** Redirects to a customization. */
+    virtual task* receive_or_steal_task( reference_count&, bool ) = 0; 
+
+    //! Free a small task t that that was allocated by a different scheduler 
+    void free_nonlocal_small_task( task& t ); 
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Padding isolating thread local members from members that can be written to by other threads.
+    char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)];
+
+    //! Head of the thread specific list of task group contexts.
+    context_list_node_t context_list_head;
+
+    //! Mutex protecting access to the list of task group contexts.
+    spin_mutex context_list_mutex;
+
+#if !__TBB_ARENA_PER_MASTER
+    //! Used to form the list of master thread schedulers.
+    scheduler_list_node_t my_node;
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Thread local cancellation epoch.
+    /** When local epoch equals the global one, the cancellation state known
+        to this thread is synchronized with the global cancellation state. **/
+    uintptr_t local_cancel_count;
+
+    //! Flag indicating that a context is being destructed by its owner thread 
+    /** Together with nonlocal_ctx_list_update constitue a synchronization protocol
+        that keeps hot path of context destruction (by the owner thread) mostly 
+        lock-free. **/
+    uintptr_t local_ctx_list_update;
+
+    //! Detaches abandoned contexts
+    /** These contexts must be destroyed by other threads. **/
+    void cleanup_local_context_list ();
+
+#if !__TBB_ARENA_PER_MASTER
+    //! Propagates cancellation request to all descendants of the context.
+    void propagate_cancellation ( task_group_context& ctx );
+#endif /* !__TBB_ARENA_PER_MASTER */
+
+    //! Propagates cancellation request to contexts registered by this scheduler.
+    void propagate_cancellation ();
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#if _WIN32||_WIN64
+private:
+    //! Handle returned by RML when registering a master with RML
+    ::rml::server::execution_resource_t master_exec_resource;
+
+#if !__TBB_ARENA_PER_MASTER
+    //! register master with the resource manager
+    void register_master() {
+        __TBB_ASSERT( my_arena->prefix().server, "RML server not defined?" );
+        // the server may ignore registration and set master_exec_resource to NULL.
+        my_arena->prefix().server->register_master( master_exec_resource );
+    }
+
+    //! unregister master with the resource manager
+    void unregister_master() const {
+        my_arena->prefix().server->unregister_master( master_exec_resource );
+    }
+#endif /* !__TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 ) */
+#endif /* _WIN32||_WIN64 */
+
+    //! Dummy slot used when scheduler is not in arena
+    /** The data structure is heavily padded, therefore it should be placed after 
+        other data fields used by the owner thread only to allow compiler using 
+        instructions with short offsets when accessing the majority of data members. **/
+    arena_slot dummy_slot;
+
+#if __TBB_TASK_GROUP_CONTEXT
+    //! Flag indicating that a context is being destructed by non-owner thread.
+    /** See also local_ctx_list_update. **/
+    uintptr_t nonlocal_ctx_list_update;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+    __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk;
+#if TBB_USE_ASSERT
+    //! State values used to check interface contract with Cilk runtime.
+    /** Names of cs_running...cs_freed derived from state machine diagram in cilk-tbb-interop.h */
+    enum cilk_state_t {
+        cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory.
+        cs_running,
+        cs_limbo,
+        cs_freed
+    };
+    cilk_state_t my_cilk_state;
+#endif /* TBB_USE_ASSERT */
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+#if __TBB_STATISTICS
+    //! Set of counters to track internal statistics on per thread basis
+    /** Placed at the end of the class definition to minimize the disturbance of
+        the core logic memory operations. **/
+    mutable statistics_counters my_counters;
+#endif /* __TBB_STATISTICS */
+
+}; // class generic_scheduler
+
+
+template<free_task_hint h>
+void generic_scheduler::free_task( task& t ) {
+    GATHER_STATISTIC(--my_counters.active_tasks);
+    task_prefix& p = t.prefix();
+    // Verify that optimization hints are correct.
+    __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL );
+    __TBB_ASSERT( !(h&small_task) || p.origin, NULL );
+#if TBB_USE_ASSERT
+    p.depth = 0xDEADBEEF;
+    p.ref_count = 0xDEADBEEF;
+    poison_pointer(p.owner);
+#endif /* TBB_USE_ASSERT */
+    __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL );
+    p.state = task::freed;
+    if( h==small_local_task || p.origin==this ) {
+        GATHER_STATISTIC(++my_counters.free_list_length);
+        p.next = free_list;
+        free_list = &t;
+    } else if( !(h&local_task) && p.origin ) {
+        free_nonlocal_small_task(t);
+    } else {
+        GATHER_STATISTIC(--my_counters.big_tasks);
+        deallocate_task(t);
+    }
+}
+
+} // namespace internal
+} // namespace tbb
+
+#include "governor.h"
+
+inline void tbb::internal::generic_scheduler::spawn( task& first, task*& next ) {
+    governor::local_scheduler()->local_spawn( first, next );
+}
+
+inline void tbb::internal::generic_scheduler::spawn_root_and_wait( task& first, task*& next ) {
+    governor::local_scheduler()->local_spawn_root_and_wait( first, next );
+}
+
+#if __TBB_ARENA_PER_MASTER
+inline void tbb::internal::generic_scheduler::enqueue( task& task_, void* /*reserved*/ ) {
+    governor::local_scheduler()->local_enqueue( task_ );
+}
+
+#endif /* __TBB_ARENA_PER_MASTER */
+#endif /* _TBB_scheduler_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_common.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_common.h
new file mode 100644 (file)
index 0000000..37ae26e
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_scheduler_common_H
+#define _TBB_scheduler_common_H
+
+#include "tbb/tbb_stddef.h"
+
+#include <string.h>  // for memset, memcpy, memmove
+
+#include "tbb_statistics.h"
+
+/* Temporarily change "private" to "public" while including "tbb/task.h".
+   This hack allows us to avoid publishing internal types and methods
+   in the public header files just for sake of friend declarations. */
+#define private public
+#include "tbb/task.h"
+#include "tbb/tbb_exception.h"
+#undef private
+
+// This macro is an attempt to get rid of ugly ifdefs in the shared parts of the code. 
+// It drops the second argument depending on whether the controlling macro is defined. 
+// The first argument is just a convenience allowing to keep comma before the macro usage.
+#if __TBB_TASK_GROUP_CONTEXT
+    #define __TBB_CONTEXT_ARG(arg1, context) arg1, context
+#else /* !__TBB_TASK_GROUP_CONTEXT */
+    #define __TBB_CONTEXT_ARG(arg1, context) arg1
+#endif /* !__TBB_TASK_GROUP_CONTEXT */
+
+#if DO_TBB_TRACE
+#include <cstdio>
+#define TBB_TRACE(x) ((void)std::printf x)
+#else
+#define TBB_TRACE(x) ((void)(0))
+#endif /* DO_TBB_TRACE */
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings
+    // These particular warnings are so ubiquitous that no attempt is made to narrow 
+    // the scope of the warnings.
+    #pragma warning (disable: 4100 4127 4312 4244 4267 4706)
+#endif
+
+namespace tbb {
+namespace internal {
+
+/** Defined in scheduler.cpp **/
+extern uintptr_t global_cancel_count;
+
+//! Alignment for a task object
+const size_t task_alignment = 16;
+
+//! Number of bytes reserved for a task prefix
+/** If not exactly sizeof(task_prefix), the extra bytes *precede* the task_prefix. */
+const size_t task_prefix_reservation_size = ((sizeof(internal::task_prefix)-1)/task_alignment+1)*task_alignment;
+
+//! Definitions for bits in task_prefix::extra_state
+enum task_extra_state {
+    //! Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0)
+    es_version_1_task = 0,
+    //! Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2)
+    es_version_3_task = 1,
+    //! Tag for v3 task_proxy.
+    es_task_proxy = 0x20,
+    //! Set if ref_count might be changed by another thread.  Used for debugging.
+    es_ref_count_active = 0x40,
+    //! Set if the task has been stolen
+    es_task_is_stolen = 0x80
+};
+
+//! Optimization hint to free_task that enables it omit unnecessary tests and code.
+enum free_task_hint {
+    //! No hint 
+    no_hint=0,
+    //! Task is known to have been allocated by this scheduler
+    local_task=1,
+    //! Task is known to be a small task.
+    /** Task should be returned to the free list of *some* scheduler, possibly not this scheduler. */
+    small_task=2,
+    //! Bitwise-OR of local_task and small_task.  
+    /** Task should be returned to free list of this scheduler. */
+    small_local_task=3
+};
+
+//------------------------------------------------------------------------
+// Debugging support
+//------------------------------------------------------------------------
+
+#if TBB_USE_ASSERT
+
+static const uintptr_t venom = 
+#if __TBB_WORDSIZE == 8
+        0xDDEEAADDDEADBEEF;
+#else
+        0xDEADBEEF;
+#endif
+
+
+/** In contrast to poison_pointer() and assert_task_valid() poison_value() is a macro 
+    because the variable used as its argument may be undefined in release builds. **/
+#define poison_value(g) (g = venom)
+
+/** Expected to be used in assertions only, thus no empty form is defined. **/
+inline bool is_alive( uintptr_t v ) { return v != venom; }
+
+/** Logically, this method should be a member of class task.
+    But we do not want to publish it, so it is here instead. */
+inline void assert_task_valid( const task& task ) {
+    __TBB_ASSERT( &task!=NULL, NULL );
+    __TBB_ASSERT( !is_poisoned(&task), NULL );
+    __TBB_ASSERT( (uintptr_t)&task % task_alignment == 0, "misaligned task" );
+    __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::recycle, "corrupt task (invalid state)" );
+}
+
+#else /* !TBB_USE_ASSERT */
+
+#define poison_value(g) ((void)0)
+
+inline void assert_task_valid( const task& ) {}
+
+#endif /* !TBB_USE_ASSERT */
+
+//------------------------------------------------------------------------
+// Helpers
+//------------------------------------------------------------------------
+
+inline bool ConcurrentWaitsEnabled ( task& t ) {
+    return (t.prefix().context->my_version_and_traits & task_group_context::concurrent_wait) != 0;
+}
+
+inline bool CancellationInfoPresent ( task& t ) {
+    return t.prefix().context->my_cancellation_requested != 0;
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+#if TBB_USE_CAPTURED_EXCEPTION
+    inline tbb_exception* TbbCurrentException( task_group_context*, tbb_exception* src) { return src->move(); }
+    inline tbb_exception* TbbCurrentException( task_group_context*, captured_exception* src) { return src; }
+#else
+    // Using macro instead of an inline function here allows to avoid evaluation of the 
+    // TbbCapturedException expression when exact propagation is enabled for the context.
+    #define TbbCurrentException(context, TbbCapturedException) \
+        context->my_version_and_traits & task_group_context::exact_exception    \
+            ? tbb_exception_ptr::allocate()    \
+            : tbb_exception_ptr::allocate( *(TbbCapturedException) );
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+
+#define TbbRegisterCurrentException(context, TbbCapturedException) \
+    if ( context->cancel_group_execution() ) {  \
+        /* We are the first to signal cancellation, so store the exception that caused it. */  \
+        context->my_exception = TbbCurrentException( context, TbbCapturedException ); \
+    }
+
+#define TbbCatchAll(context)  \
+    catch ( tbb_exception& exc ) {  \
+        TbbRegisterCurrentException( context, &exc );   \
+    } catch ( std::exception& exc ) {   \
+        TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \
+    } catch ( ... ) {   \
+        TbbRegisterCurrentException( context, captured_exception::allocate("...", "Unidentified exception") );\
+    }
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_scheduler_common_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_utility.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/scheduler_utility.h
new file mode 100644 (file)
index 0000000..a85aac1
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_scheduler_utility_H
+#define _TBB_scheduler_utility_H
+
+#include "scheduler.h"
+
+namespace tbb {
+namespace internal {
+
+//------------------------------------------------------------------------
+// auto_empty_task
+//------------------------------------------------------------------------
+
+//! Smart holder for the empty task class with automatic destruction
+class auto_empty_task {
+    task* my_task;
+    generic_scheduler* my_scheduler;
+public:
+    auto_empty_task ( __TBB_CONTEXT_ARG(generic_scheduler *s, task_group_context* context) ) 
+        : my_task( new(&s->allocate_task(sizeof(empty_task), __TBB_CONTEXT_ARG(NULL, context))) empty_task )
+        , my_scheduler(s)
+    {}
+    // empty_task has trivial destructor, so there's no need to call it.
+    ~auto_empty_task () { my_scheduler->free_task<small_local_task>(*my_task); }
+
+    operator task& () { return *my_task; }
+    task* operator & () { return my_task; }
+    task_prefix& prefix () { return my_task->prefix(); }
+}; // class auto_empty_task
+
+//------------------------------------------------------------------------
+// fast_reverse_vector
+//------------------------------------------------------------------------
+
+//! Vector that grows without reallocations, and stores items in the reverse order.
+/** Requires to initialize its first segment with a preallocated memory chunk
+    (usually it is static array or an array allocated on the stack).
+    The second template parameter specifies maximal number of segments. Each next 
+    segment is twice as large as the previous one. **/
+template<typename T, size_t max_segments = 16>
+class fast_reverse_vector
+{
+public:
+    fast_reverse_vector ( T* initial_segment, size_t segment_size )
+        : m_cur_segment(initial_segment)
+        , m_cur_segment_size(segment_size)
+        , m_pos(segment_size)
+        , m_num_segments(0)
+        , m_size(0)
+    {
+        __TBB_ASSERT ( initial_segment && segment_size, "Nonempty initial segment must be supplied");
+    }
+
+    ~fast_reverse_vector ()
+    {
+        for ( size_t i = 1; i < m_num_segments; ++i )
+            NFS_Free( m_segments[i] );
+    }
+
+    size_t size () const { return m_size + m_cur_segment_size - m_pos; }
+
+    void push_back ( const T& val )
+    {
+        if ( !m_pos ) {
+            m_segments[m_num_segments++] = m_cur_segment;
+            __TBB_ASSERT ( m_num_segments < max_segments, "Maximal capacity exceeded" );
+            m_size += m_cur_segment_size;
+            m_cur_segment_size *= 2;
+            m_pos = m_cur_segment_size;
+            m_cur_segment = (T*)NFS_Allocate( m_cur_segment_size * sizeof(T), 1, NULL );
+        }
+        m_cur_segment[--m_pos] = val;
+    }
+
+    //! Copies the contents of the vector into the dst array. 
+    /** Can only be used when T is a POD type, as copying does not invoke copy constructors. **/
+    void copy_memory ( T* dst ) const
+    {
+        size_t size = m_cur_segment_size - m_pos;
+        memcpy( dst, m_cur_segment + m_pos, size * sizeof(T) );
+        dst += size;
+        size = m_cur_segment_size / 2;
+        for ( long i = (long)m_num_segments - 1; i >= 0; --i ) {
+            memcpy( dst, m_segments[i], size * sizeof(T) );
+            dst += size;
+            size /= 2;
+        }
+    }
+
+protected:
+    //! The current (not completely filled) segment
+    T       *m_cur_segment;
+
+    //! Capacity of m_cur_segment
+    size_t  m_cur_segment_size;
+
+    //! Insertion position in m_cur_segment
+    size_t  m_pos;
+
+    //! Array of filled segments (has fixed size specified by the second template parameter)
+    T       *m_segments[max_segments];
+    
+    //! Number of filled segments (the size of m_segments)
+    size_t  m_num_segments;
+
+    //! Number of items in the segments in m_segments
+    size_t  m_size;
+
+}; // class fast_reverse_vector
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_scheduler_utility_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/semaphore.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/semaphore.h
new file mode 100644 (file)
index 0000000..6436745
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbb_semaphore_H
+#define __TBB_tbb_semaphore_H
+
+#include "tbb/tbb_stddef.h"
+
+#if _WIN32||_WIN64
+#include "tbb/machine/windows_api.h"
+
+#elif __APPLE__
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/mach_init.h>
+#include <mach/error.h>
+
+#else
+#include <semaphore.h>
+#ifdef TBB_USE_DEBUG
+#include <errno.h>
+#endif
+#endif /*_WIN32||_WIN64*/
+
+namespace tbb {
+namespace internal {
+
+
+#if _WIN32||_WIN64
+typedef LONG sem_count_t;
+//! Edsger Dijkstra's counting semaphore
+class semaphore : no_copy {
+    static const int max_semaphore_cnt = MAXLONG;
+public:
+    //! ctor
+    semaphore(size_t start_cnt_ = 0) {init_semaphore(start_cnt_);}
+    //! dtor
+    ~semaphore() {CloseHandle( sem );}
+    //! wait/acquire
+    void P() {WaitForSingleObject( sem, INFINITE );}
+    //! post/release 
+    void V() {ReleaseSemaphore( sem, 1, NULL );}
+private:
+    HANDLE sem;
+    void init_semaphore(size_t start_cnt_) {sem = CreateSemaphore( NULL, LONG(start_cnt_), max_semaphore_cnt, NULL );}
+};
+#elif __APPLE__
+//! Edsger Dijkstra's counting semaphore
+class semaphore : no_copy {
+public:
+    //! ctor
+    semaphore(int start_cnt_ = 0) : sem(start_cnt_) { init_semaphore(start_cnt_); }
+    //! dtor
+    ~semaphore() {
+        kern_return_t ret = semaphore_destroy( mach_task_self(), sem );
+        __TBB_ASSERT_EX( ret==err_none, NULL );
+    }
+    //! wait/acquire
+    void P() { 
+        int ret;
+        do {
+            ret = semaphore_wait( sem );
+        } while( ret==KERN_ABORTED );
+        __TBB_ASSERT( ret==KERN_SUCCESS, "semaphore_wait() failed" );
+    }
+    //! post/release 
+    void V() { semaphore_signal( sem ); }
+private:
+    semaphore_t sem;
+    void init_semaphore(int start_cnt_) {
+        kern_return_t ret = semaphore_create( mach_task_self(), &sem, SYNC_POLICY_FIFO, start_cnt_ );
+        __TBB_ASSERT_EX( ret==err_none, "failed to create a semaphore" );
+    }
+};
+#else /* Linux/Unix */
+typedef uint32_t sem_count_t;
+//! Edsger Dijkstra's counting semaphore
+class semaphore : no_copy {
+public:
+    //! ctor
+    semaphore(int start_cnt_ = 0 ) { init_semaphore( start_cnt_ ); }
+
+    //! dtor
+    ~semaphore() {
+        int ret = sem_destroy( &sem );
+        __TBB_ASSERT_EX( !ret, NULL );
+    }
+    //! wait/acquire
+    void P() {
+        while( sem_wait( &sem )!=0 )
+            __TBB_ASSERT( errno==EINTR, NULL );
+    }
+    //! post/release 
+    void V() { sem_post( &sem ); }
+private:
+    sem_t sem;
+    void init_semaphore(int start_cnt_) {
+        int ret = sem_init( &sem, /*shared among threads*/ 0, start_cnt_ );
+        __TBB_ASSERT_EX( !ret, NULL );
+    }
+};
+#endif /* _WIN32||_WIN64 */
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_tbb_semaphore_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_mutex.cpp
new file mode 100644 (file)
index 0000000..d5b11af
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_machine.h"
+#include "tbb/spin_mutex.h"
+#include "itt_notify.h"
+#include "tbb_misc.h"
+
+namespace tbb {
+
+void spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) {
+    __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" );
+    ITT_NOTIFY(sync_prepare, &m);
+    my_unlock_value = __TBB_LockByte(m.flag);
+    my_mutex = &m;
+    ITT_NOTIFY(sync_acquired, &m);
+}
+
+void spin_mutex::scoped_lock::internal_release() {
+    __TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" );
+    __TBB_ASSERT( !(my_unlock_value&1), "corrupted scoped_lock?" );
+
+    ITT_NOTIFY(sync_releasing, my_mutex);
+    __TBB_store_with_release(my_mutex->flag, static_cast<unsigned char>(my_unlock_value));
+    my_mutex = NULL;
+}
+
+bool spin_mutex::scoped_lock::internal_try_acquire( spin_mutex& m ) {
+    __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" );
+    bool result = bool( __TBB_TryLockByte(m.flag) );
+    if( result ) {
+        my_unlock_value = 0;
+        my_mutex = &m;
+        ITT_NOTIFY(sync_acquired, &m);
+    }
+    return result;
+}
+
+void spin_mutex::internal_construct() {
+    ITT_SYNC_CREATE(this, _T("tbb::spin_mutex"), _T(""));
+}
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_rw_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/spin_rw_mutex.cpp
new file mode 100644 (file)
index 0000000..f4f09da
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/tbb_machine.h"
+#include "itt_notify.h"
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4244)
+#endif
+
+namespace tbb {
+
+template<typename T> // a template can work with private spin_rw_mutex::state_t
+static inline T CAS(volatile T &addr, T newv, T oldv) {
+    // ICC (9.1 and 10.1 tried) unable to do implicit conversion 
+    // from "volatile T*" to "volatile void*", so explicit cast added.
+    return T(__TBB_CompareAndSwapW((volatile void *)&addr, (intptr_t)newv, (intptr_t)oldv));
+}
+
+//! Acquire write lock on the given mutex.
+bool spin_rw_mutex_v3::internal_acquire_writer()
+{
+    ITT_NOTIFY(sync_prepare, this);
+    internal::atomic_backoff backoff;
+    for(;;) {
+        state_t s = const_cast<volatile state_t&>(state); // ensure reloading
+        if( !(s & BUSY) ) { // no readers, no writers
+            if( CAS(state, WRITER, s)==s )
+                break; // successfully stored writer flag
+            backoff.reset(); // we could be very close to complete op.
+        } else if( !(s & WRITER_PENDING) ) { // no pending writers
+            __TBB_AtomicOR(&state, WRITER_PENDING);
+        }
+        backoff.pause();
+    }
+    ITT_NOTIFY(sync_acquired, this);
+    return false;
+}
+
+//! Release writer lock on the given mutex
+void spin_rw_mutex_v3::internal_release_writer() 
+{
+    ITT_NOTIFY(sync_releasing, this);
+    __TBB_AtomicAND( &state, READERS );
+}
+
+//! Acquire read lock on given mutex.
+void spin_rw_mutex_v3::internal_acquire_reader() 
+{
+    ITT_NOTIFY(sync_prepare, this);
+    internal::atomic_backoff backoff;
+    for(;;) {
+        state_t s = const_cast<volatile state_t&>(state); // ensure reloading
+        if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests
+            state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );
+            if( !( t&WRITER )) 
+                break; // successfully stored increased number of readers
+            // writer got there first, undo the increment
+            __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );
+        }
+        backoff.pause();
+    }
+
+    ITT_NOTIFY(sync_acquired, this);
+    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
+}
+
+//! Upgrade reader to become a writer.
+/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
+bool spin_rw_mutex_v3::internal_upgrade() 
+{
+    state_t s = state;
+    __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " );
+    // check and set writer-pending flag
+    // required conditions: either no pending writers, or we are the only reader
+    // (with multiple readers and pending writer, another upgrade could have been requested)
+    while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {
+        state_t old_s = s;
+        if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) {
+            internal::atomic_backoff backoff;
+            ITT_NOTIFY(sync_prepare, this);
+            // the state should be 0...0111, i.e. 1 reader and waiting writer;
+            // both new readers and writers are blocked
+            while( (state & READERS) != ONE_READER ) // more than 1 reader
+                backoff.pause(); 
+            __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer");
+
+            __TBB_FetchAndAddW( &state,  - (intptr_t)(ONE_READER+WRITER_PENDING));
+            ITT_NOTIFY(sync_acquired, this);
+            return true; // successfully upgraded
+        }
+    }
+    // slow reacquire
+    internal_release_reader();
+    return internal_acquire_writer(); // always returns false
+}
+
+//! Downgrade writer to a reader
+void spin_rw_mutex_v3::internal_downgrade() {
+    ITT_NOTIFY(sync_releasing, this);
+    __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER));
+    __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" );
+}
+
+//! Release read lock on the given mutex
+void spin_rw_mutex_v3::internal_release_reader()
+{
+    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
+    ITT_NOTIFY(sync_releasing, this); // release reader
+    __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);
+}
+
+//! Try to acquire write lock on the given mutex
+bool spin_rw_mutex_v3::internal_try_acquire_writer()
+{
+    // for a writer: only possible to acquire if no active readers or writers
+    state_t s = state;
+    if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101
+        if( CAS(state, WRITER, s)==s ) {
+            ITT_NOTIFY(sync_acquired, this);
+            return true; // successfully stored writer flag
+        }
+    return false;
+}
+
+//! Try to acquire read lock on the given mutex
+bool spin_rw_mutex_v3::internal_try_acquire_reader()
+{
+    // for a reader: acquire if no active or waiting writers
+    state_t s = state;
+    if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers
+        state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );
+        if( !( t&WRITER )) {  // got the lock
+            ITT_NOTIFY(sync_acquired, this);
+            return true; // successfully stored increased number of readers
+        }
+        // writer got there first, undo the increment
+        __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );
+    }
+    return false;
+}
+
+
+void spin_rw_mutex_v3::internal_construct() {
+    ITT_SYNC_CREATE(this, _T("tbb::spin_rw_mutex"), _T(""));
+}
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task.cpp
new file mode 100644 (file)
index 0000000..72bce80
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <new>
+
+// Do not include task.h directly. Use scheduler_common.h instead
+#include "scheduler_common.h"
+#include "governor.h"
+#include "scheduler.h"
+#include "itt_notify.h"
+
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/partitioner.h"
+
+namespace tbb {
+
+using namespace std;
+
+namespace internal {
+
+//------------------------------------------------------------------------
+// Methods of allocate_root_proxy
+//------------------------------------------------------------------------
+task& allocate_root_proxy::allocate( size_t size ) {
+    internal::generic_scheduler* v = governor::local_scheduler();
+    __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" );
+#if __TBB_TASK_GROUP_CONTEXT
+    task_prefix& p = v->innermost_running_task->prefix();
+
+    ITT_STACK_CREATE(p.context->itt_caller);
+#endif
+    // New root task becomes part of the currently running task's cancellation context
+    return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) );
+}
+
+void allocate_root_proxy::free( task& task ) {
+    internal::generic_scheduler* v = governor::local_scheduler();
+    __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
+#if __TBB_TASK_GROUP_CONTEXT
+    // No need to do anything here as long as there is no context -> task connection
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+    v->free_task<local_task>( task );
+}
+
+#if __TBB_TASK_GROUP_CONTEXT
+//------------------------------------------------------------------------
+// Methods of allocate_root_with_context_proxy
+//------------------------------------------------------------------------
+task& allocate_root_with_context_proxy::allocate( size_t size ) const {
+    internal::generic_scheduler* v = governor::local_scheduler();
+    __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" );
+    task_prefix& p = v->innermost_running_task->prefix();
+    task& t = v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, &my_context) );
+    // Supported usage model prohibits concurrent initial binding. Thus we do not 
+    // need interlocked operations or fences to manipulate with my_context.my_kind
+    if ( my_context.my_kind == task_group_context::binding_required ) {
+        __TBB_ASSERT ( my_context.my_owner, "Context without owner" );
+        __TBB_ASSERT ( !my_context.my_parent, "Parent context set before initial binding" );
+        // If we are in the outermost task dispatch loop of a master thread, then
+        // there is nothing to bind this context to, and we skip the binding part.
+        if ( v->innermost_running_task != v->dummy_task ) {
+            // Though the following assignment makes my_context accessible for 
+            // cancelation propagation, we cannot rely on the cancellation being 
+            // propagated into it without taking a global lock. Instead we always 
+            // check the state of my_context's ancestors, and use cancelation 
+            // epoch counters to minimize the depth of inspection.
+            my_context.my_parent = p.context;
+            uintptr_t local_count_snapshot = v->local_cancel_count;
+            // Prevent load of global_cancel_count from being hoisted above store
+            // to my_context.my_parent and load of local_cancel_count.
+            __TBB_full_memory_fence();
+            // The full fence guarantees that if no cancelation propagation was
+            // detected by the following condition, either my_context's parent 
+            // has correct cancelation state or my_context will receive cancelation
+            // signal if new cancelation starts after 
+            if ( local_count_snapshot != global_cancel_count ) {
+                // Another thread is propagating cancellation right now. Make sure 
+                // that my_context's parent gets the cancellation request (if one 
+                // of its ancestors is canceled) before we read it later on.
+                p.context->propagate_cancellation_from_ancestors();
+            }
+            if ( p.context->my_cancellation_requested ) {
+                // Propagate cancellation state from the parent context
+                my_context.my_cancellation_requested = 1;
+            }
+        }
+        my_context.my_kind = task_group_context::binding_completed;
+    }
+    // else the context either has already been associated with its parent or is isolated
+    ITT_STACK_CREATE(my_context.itt_caller);
+    return t;
+}
+
+void allocate_root_with_context_proxy::free( task& task ) const {
+    internal::generic_scheduler* v = governor::local_scheduler();
+    __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
+    // No need to do anything here as long as unbinding is performed by context destructor only.
+    v->free_task<local_task>( task );
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+//------------------------------------------------------------------------
+// Methods of allocate_continuation_proxy
+//------------------------------------------------------------------------
+task& allocate_continuation_proxy::allocate( size_t size ) const {
+    task& t = *((task*)this);
+    assert_task_valid(t);
+    generic_scheduler* s = governor::local_scheduler();
+    task* parent = t.parent();
+    t.prefix().parent = NULL;
+    return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t.prefix().context) );
+}
+
+void allocate_continuation_proxy::free( task& mytask ) const {
+    // Restore the parent as it was before the corresponding allocate was called.
+    ((task*)this)->prefix().parent = mytask.parent();
+    governor::local_scheduler()->free_task<local_task>(mytask);
+}
+
+//------------------------------------------------------------------------
+// Methods of allocate_child_proxy
+//------------------------------------------------------------------------
+task& allocate_child_proxy::allocate( size_t size ) const {
+    task& t = *((task*)this);
+    assert_task_valid(t);
+    generic_scheduler* s = governor::local_scheduler();
+    return s->allocate_task( size, __TBB_CONTEXT_ARG(&t, t.prefix().context) );
+}
+
+void allocate_child_proxy::free( task& mytask ) const {
+    governor::local_scheduler()->free_task<local_task>(mytask);
+}
+
+//------------------------------------------------------------------------
+// Methods of allocate_additional_child_of_proxy
+//------------------------------------------------------------------------
+task& allocate_additional_child_of_proxy::allocate( size_t size ) const {
+    parent.increment_ref_count();
+    generic_scheduler* s = governor::local_scheduler();
+    return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) );
+}
+
+void allocate_additional_child_of_proxy::free( task& task ) const {
+    // Undo the increment.  We do not check the result of the fetch-and-decrement.
+    // We could consider be spawning the task if the fetch-and-decrement returns 1.
+    // But we do not know that was the programmer's intention.
+    // Furthermore, if it was the programmer's intention, the program has a fundamental
+    // race condition (that we warn about in Reference manual), because the
+    // reference count might have become zero before the corresponding call to
+    // allocate_additional_child_of_proxy::allocate.
+    parent.internal_decrement_ref_count();
+    governor::local_scheduler()->free_task<local_task>(task);
+}
+
+//------------------------------------------------------------------------
+// Support for auto_partitioner
+//------------------------------------------------------------------------
+size_t get_initial_auto_partitioner_divisor() {
+    const size_t X_FACTOR = 4;
+    return X_FACTOR * (governor::max_number_of_workers()+1);
+}
+
+//------------------------------------------------------------------------
+// Methods of affinity_partitioner_base_v3
+//------------------------------------------------------------------------
+void affinity_partitioner_base_v3::resize( unsigned factor ) {
+    // Check factor to avoid asking for number of workers while there might be no arena.
+    size_t new_size = factor ? factor*(governor::max_number_of_workers()+1) : 0;
+    if( new_size!=my_size ) {
+        if( my_array ) {
+            NFS_Free( my_array );
+            // Following two assignments must be done here for sake of exception safety.
+            my_array = NULL;
+            my_size = 0;
+        }
+        if( new_size ) {
+            my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL ));
+            memset( my_array, 0, sizeof(affinity_id)*new_size );
+            my_size = new_size;
+        }
+    } 
+}
+
+} // namespace internal
+
+using namespace tbb::internal;
+
+//------------------------------------------------------------------------
+// task
+//------------------------------------------------------------------------
+
+void task::internal_set_ref_count( int count ) {
+    __TBB_ASSERT( count>=0, "count must not be negative" );
+    __TBB_ASSERT( !(prefix().extra_state & es_ref_count_active), "ref_count race detected" );
+    ITT_NOTIFY(sync_releasing, &prefix().ref_count);
+    prefix().ref_count = count;
+}
+
+internal::reference_count task::internal_decrement_ref_count() {
+    ITT_NOTIFY( sync_releasing, &prefix().ref_count );
+    internal::reference_count k = __TBB_FetchAndDecrementWrelease( &prefix().ref_count );
+    __TBB_ASSERT( k>=1, "task's reference count underflowed" );
+    if( k==1 )
+        ITT_NOTIFY( sync_acquired, &prefix().ref_count );
+    return k-1;
+}
+
+task& task::self() {
+    generic_scheduler *v = governor::local_scheduler();
+    v->assert_task_pool_valid();
+    __TBB_ASSERT( v->innermost_running_task, NULL );
+    return *v->innermost_running_task;
+}
+
+bool task::is_owned_by_current_thread() const {
+    return true;
+}
+
+void interface5::internal::task_base::destroy( task& victim ) {
+    // 1 may be a guard reference for wait_for_all, which was not reset because 
+    // of concurrent_wait mode or because prepared root task was not actually used
+    // for spawning tasks (as in structured_task_group).
+    __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, "Task being destroyed must not have children" );
+    __TBB_ASSERT( victim.state()==task::allocated, "illegal state for victim task" );
+    task* parent = victim.parent();
+    victim.~task();
+    if( parent ) {
+        __TBB_ASSERT( parent->state()==task::allocated, "attempt to destroy child of running or corrupted parent?" );
+        parent->internal_decrement_ref_count();
+        // Despite last reference to *parent removed, it should not be destroyed (documented behavior).
+    }
+    governor::local_scheduler()->free_task<no_hint>( victim );
+}
+
+void task::spawn_and_wait_for_all( task_list& list ) {
+    generic_scheduler* s = governor::local_scheduler();
+    task* t = list.first;
+    if( t ) {
+        if( &t->prefix().next!=list.next_ptr )
+            s->local_spawn( *t->prefix().next, *list.next_ptr );
+        list.clear();
+    }
+    s->local_wait_for_all( *this, t );
+}
+
+/** Defined out of line so that compiler does not replicate task's vtable. 
+    It's pointless to define it inline anyway, because all call sites to it are virtual calls
+    that the compiler is unlikely to optimize. */
+void task::note_affinity( affinity_id ) {
+}
+
+} // namespace tbb
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_group_context.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_group_context.cpp
new file mode 100644 (file)
index 0000000..595dfb2
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "scheduler.h"
+
+#include "tbb/task.h"
+#include "tbb/tbb_exception.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "itt_notify.h"
+
+namespace tbb {
+
+#if __TBB_TASK_GROUP_CONTEXT
+
+using namespace internal;
+
+//------------------------------------------------------------------------
+// captured_exception
+//------------------------------------------------------------------------
+
+inline char* duplicate_string ( const char* src ) {
+    char* dst = NULL;
+    if ( src ) {
+        size_t len = strlen(src) + 1;
+        dst = (char*)allocate_via_handler_v3(len);
+        strncpy (dst, src, len);
+    }
+    return dst;
+}
+
+void captured_exception::set ( const char* name, const char* info ) throw() {
+    my_exception_name = duplicate_string( name );
+    my_exception_info = duplicate_string( info );
+}
+
+void captured_exception::clear () throw() {
+    deallocate_via_handler_v3 (const_cast<char*>(my_exception_name));
+    deallocate_via_handler_v3 (const_cast<char*>(my_exception_info));
+}
+
+captured_exception* captured_exception::move () throw() {
+    captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception));
+    if ( e ) {
+        ::new (e) captured_exception();
+        e->my_exception_name = my_exception_name;
+        e->my_exception_info = my_exception_info;
+        e->my_dynamic = true;
+        my_exception_name = my_exception_info = NULL;
+    }
+    return e;
+}
+
+void captured_exception::destroy () throw() {
+    __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" );
+    if ( my_dynamic ) {
+        this->captured_exception::~captured_exception();
+        deallocate_via_handler_v3 (this);
+    }
+}
+
+captured_exception* captured_exception::allocate ( const char* name, const char* info ) {
+    captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) );
+    if ( e ) {
+        ::new (e) captured_exception(name, info);
+        e->my_dynamic = true;
+    }
+    return e;
+}
+
+const char* captured_exception::name() const throw() {
+    return my_exception_name;
+}
+
+const char* captured_exception::what() const throw() {
+    return my_exception_info;
+}
+
+
+//------------------------------------------------------------------------
+// tbb_exception_ptr
+//------------------------------------------------------------------------
+
+#if !TBB_USE_CAPTURED_EXCEPTION
+
+namespace internal {
+
+template<typename T>
+tbb_exception_ptr* AllocateExceptionContainer( const T& src ) {
+    tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );
+    if ( eptr )
+        new (eptr) tbb_exception_ptr(src);
+    return eptr;
+}
+
+tbb_exception_ptr* tbb_exception_ptr::allocate () {
+    return AllocateExceptionContainer( std::current_exception() );
+}
+
+tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {
+    return AllocateExceptionContainer( std::current_exception() );
+}
+
+tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {
+    tbb_exception_ptr *res = AllocateExceptionContainer( src );
+    src.destroy();
+    return res;
+}
+
+void tbb_exception_ptr::destroy () throw() {
+    this->tbb_exception_ptr::~tbb_exception_ptr();
+    deallocate_via_handler_v3 (this);
+}
+
+} // namespace internal
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+
+
+//------------------------------------------------------------------------
+// task_group_context
+//------------------------------------------------------------------------
+
+task_group_context::~task_group_context () {
+    if ( my_kind != isolated ) {
+        generic_scheduler *s = (generic_scheduler*)my_owner;
+        if ( governor::is_set(s) ) {
+            // Local update of the context list 
+            uintptr_t local_count_snapshot = s->local_cancel_count;
+            s->local_ctx_list_update = 1;
+            __TBB_full_memory_fence();
+            if ( s->nonlocal_ctx_list_update ) {
+                spin_mutex::scoped_lock lock(s->context_list_mutex);
+                my_node.my_prev->my_next = my_node.my_next;
+                my_node.my_next->my_prev = my_node.my_prev;
+                s->local_ctx_list_update = 0;
+            }
+            else {
+                my_node.my_prev->my_next = my_node.my_next;
+                my_node.my_next->my_prev = my_node.my_prev;
+                __TBB_store_with_release( s->local_ctx_list_update, 0 );
+                if ( local_count_snapshot != global_cancel_count ) {
+                    // Another thread was propagating cancellation request when we removed
+                    // ourselves from the list. We must ensure that it is not accessing us 
+                    // when this destructor finishes. We'll be able to acquire the lock 
+                    // below only after the other thread finishes with us.
+                    spin_mutex::scoped_lock lock(s->context_list_mutex);
+                }
+            }
+        }
+        else {
+            // Nonlocal update of the context list 
+            if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) {
+                my_node.my_prev->my_next = my_node.my_next;
+                my_node.my_next->my_prev = my_node.my_prev;
+            }
+            else {
+                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1);
+                spin_wait_until_eq( s->local_ctx_list_update, 0u );
+                s->context_list_mutex.lock();
+                my_node.my_prev->my_next = my_node.my_next;
+                my_node.my_next->my_prev = my_node.my_prev;
+                s->context_list_mutex.unlock();
+                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1);
+            }
+        }
+    }
+#if TBB_USE_DEBUG
+    my_version_and_traits = 0xDeadBeef;
+#endif /* TBB_USE_DEBUG */
+    if ( my_exception )
+        my_exception->destroy();
+    if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller);
+}
+
+void task_group_context::init () {
+    __TBB_ASSERT ( sizeof(uintptr_t) < 32, "Layout of my_version_and_traits must be reconsidered on this platform" );
+    __TBB_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
+    __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
+    __TBB_ASSERT ( my_kind == isolated || my_kind == bound, "Context can be created only as isolated or bound" );
+    my_parent = NULL;
+    my_cancellation_requested = 0;
+    my_exception = NULL;
+    itt_caller = ITT_CALLER_NULL;
+    if ( my_kind == bound ) {
+        generic_scheduler *s = governor::local_scheduler();
+        my_owner = s;
+        __TBB_ASSERT ( my_owner, "Thread has not activated a task_scheduler_init object?" );
+        // Backward links are used by this thread only, thus no fences are necessary
+        my_node.my_prev = &s->context_list_head;
+        s->context_list_head.my_next->my_prev = &my_node;
+        my_node.my_next = s->context_list_head.my_next;
+        // Thread local list of contexts allows concurrent traversal by another 
+        // thread while propagating cancellation request. Release fence ensures 
+        // visibility of my_node's members in the traversing thread.
+        __TBB_store_with_release(s->context_list_head.my_next, &my_node);
+    }
+}
+
+bool task_group_context::cancel_group_execution () {
+    __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state");
+    if ( my_cancellation_requested || __TBB_CompareAndSwapW(&my_cancellation_requested, 1, 0) ) {
+        // This task group has already been canceled
+        return false;
+    }
+#if __TBB_ARENA_PER_MASTER
+    governor::local_scheduler()->my_arena->propagate_cancellation( *this );
+#else /* !__TBB_ARENA_PER_MASTER */
+    governor::local_scheduler()->propagate_cancellation( *this );
+#endif /* !__TBB_ARENA_PER_MASTER */
+    return true;
+}
+
+bool task_group_context::is_group_execution_cancelled () const {
+    return my_cancellation_requested != 0;
+}
+
+// IMPORTANT: It is assumed that this method is not used concurrently!
+void task_group_context::reset () {
+    //! \todo Add assertion that this context does not have children
+    // No fences are necessary since this context can be accessed from another thread
+    // only after stealing happened (which means necessary fences were used).
+    if ( my_exception )  {
+        my_exception->destroy();
+        my_exception = NULL;
+    }
+    my_cancellation_requested = 0;
+}
+
+void task_group_context::propagate_cancellation_from_ancestors () {
+    task_group_context *ancestor = my_parent;
+    while ( ancestor && !ancestor->my_cancellation_requested )
+        ancestor = ancestor->my_parent;
+    if ( ancestor ) {
+        // One of my ancestor groups was canceled. Cancel all its descendants in my heritage line.
+        task_group_context *ctx = this;
+        do {
+            ctx->my_cancellation_requested = 1;
+            ctx = ctx->my_parent;
+        } while ( ctx != ancestor );
+    }
+}
+
+void task_group_context::register_pending_exception () {
+    if ( my_cancellation_requested )
+        return;
+#if TBB_USE_EXCEPTIONS
+    try {
+        throw;
+    } TbbCatchAll( this );
+#endif /* TBB_USE_EXCEPTIONS */
+}
+
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_stream.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/task_stream.h
new file mode 100644 (file)
index 0000000..8ba4865
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_task_stream_H
+#define _TBB_task_stream_H
+
+#include "tbb/tbb_stddef.h"
+
+#if __TBB_ARENA_PER_MASTER
+
+#include <deque>
+#include <climits>
+#include "tbb/atomic.h" // for __TBB_Atomic*
+#include "tbb/spin_mutex.h"
+#include "tbb/tbb_allocator.h"
+#include "scheduler_common.h"
+#include "tbb_misc.h" // for FastRandom
+
+namespace tbb {
+namespace internal {
+
+//! Essentially, this is just a pair of a queue and a mutex to protect the queue.
+/** The reason std::pair is not used is that the code would look less clean
+    if field names were replaced with 'first' and 'second'. **/
+template< typename T, typename mutex_t >
+struct queue_and_mutex {
+    typedef std::deque< T, tbb_allocator<T> > queue_base_t;
+
+    queue_base_t my_queue;
+    mutex_t      my_mutex;
+
+    queue_and_mutex () : my_queue(), my_mutex() {}
+    ~queue_and_mutex () {}
+};
+
+const uintptr_t one = 1;
+
+inline void set_one_bit( uintptr_t& dest, int pos ) {
+    __TBB_ASSERT( pos>=0, NULL );
+    __TBB_ASSERT( pos<32, NULL );
+    __TBB_AtomicOR( &dest, one<<pos );
+}
+
+inline void clear_one_bit( uintptr_t& dest, int pos ) {
+    __TBB_ASSERT( pos>=0, NULL );
+    __TBB_ASSERT( pos<32, NULL );
+    __TBB_AtomicAND( &dest, ~(one<<pos) );
+}
+
+inline bool is_bit_set( uintptr_t val, int pos ) {
+    __TBB_ASSERT( pos>=0, NULL );
+    __TBB_ASSERT( pos<32, NULL );
+    return (val & (one<<pos)) != 0;
+}
+
+//! The container for "fairness-oriented" aka "enqueued" tasks.
+class task_stream {
+    typedef queue_and_mutex <task*, spin_mutex> lane_t;
+    unsigned N;
+    uintptr_t population;
+    FastRandom random;
+    padded<lane_t>* lanes;
+
+public:
+    task_stream() : N(), population(), random(unsigned(&N-(unsigned*)NULL)), lanes()
+    {
+        __TBB_ASSERT( sizeof(population) * CHAR_BIT >= 32, NULL );
+    }
+
+    void initialize( unsigned n_lanes ) {
+        N = n_lanes>=32 ? 32 : n_lanes>2 ? 1<<(__TBB_Log2(n_lanes-1)+1) : 2;
+        __TBB_ASSERT( N==32 || N>=n_lanes && ((N-1)&N)==0, "number of lanes miscalculated");
+        lanes = new padded<lane_t>[N];
+        __TBB_ASSERT( !population, NULL );
+    }
+
+    ~task_stream() { if (lanes) delete[] lanes; }
+
+    //! Push a task into a lane.
+    void push( task* source, unsigned& last_random ) {
+        // Lane selection is random. Each thread should keep a separate seed value.
+        unsigned idx;
+        for( ; ; ) {
+            idx = random.get(last_random) & (N-1);
+            spin_mutex::scoped_lock lock;
+            if( lock.try_acquire(lanes[idx].my_mutex) ) {
+                lanes[idx].my_queue.push_back(source);
+                set_one_bit( population, idx );
+                break;
+            }
+        }
+    }
+    //! Try finding and popping a task.
+    /** Does not change destination if unsuccessful. */
+    void pop( task*& dest, unsigned& last_used_lane ) {
+        if( !population ) return; // keeps the hot path shorter
+        // Lane selection is round-robin. Each thread should keep its last used lane.
+        unsigned idx = (last_used_lane+1)&(N-1);
+        for( ; population; idx=(idx+1)&(N-1) ) {
+            if( is_bit_set( population, idx ) ) {
+                lane_t& lane = lanes[idx];
+                spin_mutex::scoped_lock lock;
+                if( lock.try_acquire(lane.my_mutex) && !lane.my_queue.empty() ) {
+                    dest = lane.my_queue.front();
+                    lane.my_queue.pop_front();
+                    if( lane.my_queue.empty() )
+                        clear_one_bit( population, idx );
+                    break;
+                }
+            }
+        }
+        last_used_lane = idx;
+    }
+
+    //! Checks existence of a task.
+    bool empty() {
+        return !population;
+    }
+    //! Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
+    /** Tasks are not executed, because it would potentially create more tasks at a late stage.
+        The scheduler is really expected to execute all tasks before task_stream destruction. */
+    intptr_t drain() {
+        intptr_t result = 0;
+        for(unsigned i=0; i<N; ++i) {
+            lane_t& lane = lanes[i];
+            spin_mutex::scoped_lock lock(lane.my_mutex);
+            for(lane_t::queue_base_t::iterator it=lane.my_queue.begin();
+                it!=lane.my_queue.end(); ++it, ++result)
+            {
+                task* t = *it;
+                tbb::task::destroy(*t);
+            }
+            lane.my_queue.clear();
+            clear_one_bit( population, i );
+        }
+        return result;
+    }
+}; // task_stream
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_ARENA_PER_MASTER */
+
+#endif /* _TBB_task_stream_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_assert_impl.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_assert_impl.h
new file mode 100644 (file)
index 0000000..52cd780
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// IMPORTANT: To use assertion handling in TBB, exactly one of the TBB source files
+// should #include tbb_assert_impl.h thus instantiating assertion handling routines.
+// The intent of putting it to a separate file is to allow some tests to use it
+// as well in order to avoid dependency on the library.
+
+// include headers for required function declarations
+#include <cstdlib>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#if _MSC_VER
+#include <crtdbg.h>
+#define __TBB_USE_DBGBREAK_DLG TBB_USE_DEBUG
+#endif
+
+#if _MSC_VER >= 1400
+#define __TBB_EXPORTED_FUNC   __cdecl
+#else
+#define __TBB_EXPORTED_FUNC
+#endif
+
+using namespace std;
+
+namespace tbb {
+    //! Type for an assertion handler
+    typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment );
+
+    static assertion_handler_type assertion_handler;
+
+    assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ) {
+        assertion_handler_type old_handler = assertion_handler;
+        assertion_handler = new_handler;
+        return old_handler;
+    }
+
+    void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ) {
+        if( assertion_handler_type a = assertion_handler ) {
+            (*a)(filename,line,expression,comment);
+        } else {
+            static bool already_failed;
+            if( !already_failed ) {
+                already_failed = true;
+                fprintf( stderr, "Assertion %s failed on line %d of file %s\n",
+                         expression, line, filename );
+                if( comment )
+                    fprintf( stderr, "Detailed description: %s\n", comment );
+#if __TBB_USE_DBGBREAK_DLG
+                if(1 == _CrtDbgReport(_CRT_ASSERT, filename, line, "tbb_debug.dll", "%s\r\n%s", expression, comment?comment:""))
+                        _CrtDbgBreak();
+#else
+                fflush(stderr);
+                abort();
+#endif
+            }
+        }
+    }
+
+#if defined(_MSC_VER)&&_MSC_VER<1400
+#   define vsnprintf _vsnprintf
+#endif
+
+    namespace internal {
+        //! Report a runtime warning.
+        void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... )
+        {
+            char str[1024]; memset(str, 0, 1024);
+            va_list args; va_start(args, format);
+            vsnprintf( str, 1024-1, format, args);
+            fprintf( stderr, "TBB Warning: %s\n", str);
+        }
+    } // namespace internal
+
+} /* namespace tbb */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.cpp
new file mode 100644 (file)
index 0000000..5bd3e45
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb_main.h"
+#include "governor.h"
+#include "tbb_misc.h"
+#include "itt_notify.h"
+
+namespace tbb {
+namespace internal {
+
+//------------------------------------------------------------------------
+// Begin shared data layout.
+// The following global data items are mostly read-only after initialization.
+//------------------------------------------------------------------------
+
+//! Padding in order to prevent false sharing.
+static const char _pad[NFS_MaxLineSize - sizeof(int)] = {};
+
+//------------------------------------------------------------------------
+// governor data
+basic_tls<generic_scheduler*> governor::theTLS;
+#if !__TBB_ARENA_PER_MASTER
+arena* governor::theArena;
+mutex  governor::theArenaMutex;
+unsigned governor::NumWorkers;
+#endif /* !__TBB_ARENA_PER_MASTER */
+unsigned governor::DefaultNumberOfThreads;
+rml::tbb_factory governor::theRMLServerFactory;
+bool governor::UsePrivateRML;
+
+#if __TBB_ARENA_PER_MASTER
+//------------------------------------------------------------------------
+// market data
+market* market::theMarket;
+market::global_market_mutex_type market::theMarketMutex;
+#endif /* __TBB_ARENA_PER_MASTER */
+
+//------------------------------------------------------------------------
+// One time initialization data
+
+//! Counter of references to global shared resources such as TLS.
+atomic<int> __TBB_InitOnce::count;
+
+__TBB_InitOnce::mutex_type __TBB_InitOnce::InitializationLock;
+
+//! Flag that is set to true after one-time initializations are done.
+bool __TBB_InitOnce::InitializationDone;
+
+#if DO_ITT_NOTIFY
+    static bool ITT_Present;
+    static bool ITT_InitializationDone;
+#endif
+
+#if !(_WIN32||_WIN64) || __TBB_TASK_CPP_DIRECTLY_INCLUDED
+    static __TBB_InitOnce __TBB_InitOnceHiddenInstance;
+#endif
+
+//------------------------------------------------------------------------
+// generic_scheduler data
+
+//! Pointer to the scheduler factory function
+generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index );
+
+//! Table of primes used by fast random-number generator (FastRandom).
+/** Also serves to keep anything else from being placed in the same
+    cache line as the global data items preceding it. */
+static const unsigned Primes[] = {
+    0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5,
+    0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b,
+    0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
+    0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b,
+    0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801,
+    0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
+    0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed,
+    0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b,
+    0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
+    0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7,
+    0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7,
+    0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
+    0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b,
+    0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b,
+    0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
+    0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f
+};
+
+//------------------------------------------------------------------------
+// End of shared data layout
+//------------------------------------------------------------------------
+
+//------------------------------------------------------------------------
+// Shared data accessors
+//------------------------------------------------------------------------
+
+unsigned GetPrime ( unsigned seed ) {
+    return Primes[seed%(sizeof(Primes)/sizeof(Primes[0]))];
+}
+
+//------------------------------------------------------------------------
+// __TBB_InitOnce
+//------------------------------------------------------------------------
+
+void __TBB_InitOnce::add_ref() {
+    if( ++count==1 )
+        governor::acquire_resources();
+}
+
+void __TBB_InitOnce::remove_ref() {
+    int k = --count;
+    __TBB_ASSERT(k>=0,"removed __TBB_InitOnce ref that was not added?"); 
+    if( k==0 ) 
+        governor::release_resources();
+}
+
+//------------------------------------------------------------------------
+// One-time Initializations
+//------------------------------------------------------------------------
+
+//! Defined in cache_aligned_allocator.cpp
+void initialize_cache_aligned_allocator();
+
+#if __TBB_SURVIVE_THREAD_SWITCH
+//! Defined in governor.cpp
+void initialize_survive_thread_switch();
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+
+//! Defined in scheduler.cpp
+void Scheduler_OneTimeInitialization ( bool itt_present );
+
+#if DO_ITT_NOTIFY
+
+/** Thread-unsafe lazy one-time initialization of tools interop.
+    Used by both dummy handlers and general TBB one-time initialization routine. **/
+void ITT_DoUnsafeOneTimeInitialization () {
+    if ( !ITT_InitializationDone ) {
+        ITT_Present = (__TBB_load_ittnotify()!=0);
+        ITT_InitializationDone = true;
+#if __TBB_ARENA_PER_MASTER
+        ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization);
+#else /* !__TBB_ARENA_PER_MASTER */
+        ITT_SYNC_CREATE(&governor::theArenaMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization);
+#endif /* !__TBB_ARENA_PER_MASTER */
+    }
+}
+
+/** Thread-safe lazy one-time initialization of tools interop.
+    Used by dummy handlers only. **/
+extern "C"
+void ITT_DoOneTimeInitialization() {
+    __TBB_InitOnce::lock();
+    ITT_DoUnsafeOneTimeInitialization();
+    __TBB_InitOnce::unlock();
+}
+#endif /* DO_ITT_NOTIFY */
+
+//! Performs thread-safe lazy one-time general TBB initialization.
+void DoOneTimeInitializations() {
+    __TBB_InitOnce::lock();
+    // No fence required for load of InitializationDone, because we are inside a critical section.
+    if( !__TBB_InitOnce::InitializationDone ) {
+        __TBB_InitOnce::add_ref();
+        if( GetBoolEnvironmentVariable("TBB_VERSION") )
+            PrintVersion();
+        bool have_itt = false;
+#if DO_ITT_NOTIFY
+        ITT_DoUnsafeOneTimeInitialization();
+        have_itt = ITT_Present;
+#endif /* DO_ITT_NOTIFY */
+        initialize_cache_aligned_allocator();
+#if __TBB_SURVIVE_THREAD_SWITCH
+        initialize_survive_thread_switch();
+#endif /* __TBB_SURVIVE_THREAD_SWITCH */
+        governor::print_version_info();
+        PrintExtraVersionInfo( "SCHEDULER", have_itt ? "default" : "Intel" );
+        Scheduler_OneTimeInitialization( have_itt );
+        __TBB_InitOnce::InitializationDone = true;
+    }
+    __TBB_InitOnce::unlock();
+}
+
+#if (_WIN32||_WIN64) && !__TBB_TASK_CPP_DIRECTLY_INCLUDED
+//! Windows "DllMain" that handles startup and shutdown of dynamic library.
+extern "C" bool WINAPI DllMain( HANDLE /*hinstDLL*/, DWORD reason, LPVOID /*lpvReserved*/ ) {
+    switch( reason ) {
+        case DLL_PROCESS_ATTACH:
+            __TBB_InitOnce::add_ref();
+            break;
+        case DLL_PROCESS_DETACH:
+            __TBB_InitOnce::remove_ref();
+            // It is assumed that InitializationDone is not set after DLL_PROCESS_DETACH,
+            // and thus no race on InitializationDone is possible.
+            if( __TBB_InitOnce::initialization_done() ) {
+                // Remove reference that we added in DoOneTimeInitializations.
+                __TBB_InitOnce::remove_ref();
+            }
+            break;
+        case DLL_THREAD_DETACH:
+            governor::terminate_auto_initialized_scheduler();
+            break;
+    }
+    return true;
+}
+#endif /* (_WIN32||_WIN64) && !__TBB_TASK_CPP_DIRECTLY_INCLUDED */
+
+void itt_store_pointer_with_release_v3( void* dst, void* src ) {
+    ITT_NOTIFY(sync_releasing, dst);
+    __TBB_store_with_release(*static_cast<void**>(dst),src);
+}
+
+void* itt_load_pointer_with_acquire_v3( const void* src ) {
+    void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src));
+    ITT_NOTIFY(sync_acquired, const_cast<void*>(src));
+    return result;
+}
+    
+void* itt_load_pointer_v3( const void* src ) {
+    void* result = *static_cast<void*const*>(src);
+    return result;
+}
+
+void itt_set_sync_name_v3( void* obj, const tchar* name) {
+    ITT_SYNC_RENAME(obj, name);
+    (void)obj, (void)name;  // Prevents compiler warning when ITT support is switched off
+}
+
+
+} // namespace internal
+} // namespace tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_main.h
new file mode 100644 (file)
index 0000000..b207b34
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_tbb_main_H
+#define _TBB_tbb_main_H
+
+#include "tbb/atomic.h"
+
+namespace tbb {
+
+namespace internal {
+
+void DoOneTimeInitializations ();
+
+//------------------------------------------------------------------------
+// __TBB_InitOnce
+//------------------------------------------------------------------------
+
+//! Class that supports TBB initialization. 
+/** It handles acquisition and release of global resources (e.g. TLS) during startup and shutdown,
+    as well as synchronization for DoOneTimeInitializations. */
+class __TBB_InitOnce {
+    friend void DoOneTimeInitializations();
+    friend void ITT_DoUnsafeOneTimeInitialization ();
+
+    static atomic<int> count;
+
+    //! Platform specific code to acquire resources.
+    static void acquire_resources();
+
+    //! Platform specific code to release resources.
+    static void release_resources();
+
+    //! Specifies if the one-time initializations has been done.
+    static bool InitializationDone;
+
+    // Scenarios are possible when tools interop has to be initialized before the
+    // TBB itself. This imposes a requirement that the global initialization lock 
+    // has to support valid static initialization, and does not issue any tool
+    // notifications in any build mode.
+    typedef unsigned char mutex_type;
+
+    // Global initialization lock
+    static mutex_type InitializationLock;
+
+public:
+    static void lock()   { __TBB_LockByte( InitializationLock ); }
+
+    static void unlock() { __TBB_store_with_release( InitializationLock, 0 ); }
+
+    static bool initialization_done() { return __TBB_load_with_acquire(InitializationDone); }
+
+    //! Add initial reference to resources. 
+    /** We assume that dynamic loading of the library prevents any other threads 
+        from entering the library until this constructor has finished running. **/
+    __TBB_InitOnce() { add_ref(); }
+
+    //! Remove the initial reference to resources.
+    /** This is not necessarily the last reference if other threads are still running. **/
+    ~__TBB_InitOnce() {
+        remove_ref();
+        // We assume that InitializationDone is not set after file-scope destructors
+        // start running, and thus no race on InitializationDone is possible.
+        if( initialization_done() ) {
+            // Remove an extra reference that was added in DoOneTimeInitializations.
+            remove_ref();  
+        }
+    } 
+    //! Add reference to resources.  If first reference added, acquire the resources.
+    static void add_ref();
+
+    //! Remove reference to resources.  If last reference removed, release the resources.
+    static void remove_ref();
+}; // class __TBB_InitOnce
+
+
+} // namespace internal
+
+} // namespace tbb
+
+#endif /* _TBB_tbb_main_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.cpp
new file mode 100644 (file)
index 0000000..9efb6a6
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Source file for miscellaneous entities that are infrequently referenced by 
+// an executing program.
+
+#include "tbb/tbb_stddef.h"
+#include "tbb_assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here.
+#include "tbb/tbb_exception.h"
+#include "tbb/tbb_machine.h"
+#include "tbb_misc.h"
+#include <cstdio>
+#include <cstdlib>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+using namespace std;
+
+namespace tbb {
+
+const char* bad_last_alloc::what() const throw() { return "bad allocation in previous or concurrent attempt"; }
+const char* improper_lock::what() const throw() { return "attempted recursive lock on critical section or non-recursive mutex"; }
+const char* invalid_multiple_scheduling::what() const throw() { return "The same task_handle object cannot be executed more than once"; }
+const char* missing_wait::what() const throw() { return "wait() was not called on the structured_task_group"; }
+
+namespace internal {
+
+#if TBB_USE_EXCEPTIONS
+    #define DO_THROW(exc, init_args) throw exc init_args;
+#else /* !TBB_USE_EXCEPTIONS */
+    #define PRINT_ERROR_AND_ABORT(exc_name, msg) \
+        fprintf (stderr, "Exception %s with message %s would've been thrown, "  \
+            "if exception handling were not disabled. Aborting.\n", exc_name, msg); \
+        fflush(stderr); \
+        abort();
+    #define DO_THROW(exc, init_args) PRINT_ERROR_AND_ABORT(#exc, #init_args)
+#endif /* !TBB_USE_EXCEPTIONS */
+
+
+/* The "what" should be fairly short, not more than about 128 characters.
+   Because we control all the call sites to handle_perror, it is pointless
+   to bullet-proof it for very long strings.
+
+   Design note: ADR put this routine off to the side in tbb_misc.cpp instead of
+   Task.cpp because the throw generates a pathetic lot of code, and ADR wanted
+   this large chunk of code to be placed on a cold page. */
+void handle_perror( int error_code, const char* what ) {
+    char buf[256];
+    __TBB_ASSERT( strlen(what) < sizeof(buf) - 64, "Error description is too long" );
+    sprintf(buf,"%s: ",what);
+    char* end = strchr(buf,0);
+    size_t n = buf+sizeof(buf)-end;
+    strncpy( end, strerror( error_code ), n );
+    // Ensure that buffer ends in terminator.
+    buf[sizeof(buf)-1] = 0; 
+#if TBB_USE_EXCEPTIONS
+    throw runtime_error(buf);
+#else
+    PRINT_ERROR_AND_ABORT( "runtime_error", buf);
+#endif /* !TBB_USE_EXCEPTIONS */
+}
+
+#if _WIN32||_WIN64 
+void handle_win_error( int error_code ) {
+    char buf[512];
+    FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+                    NULL, error_code, 0, buf, sizeof(buf), NULL );
+#if TBB_USE_EXCEPTIONS
+    throw runtime_error(buf);
+#else
+    PRINT_ERROR_AND_ABORT( "runtime_error", buf);
+#endif /* !TBB_USE_EXCEPTIONS */
+}
+#endif // _WIN32||_WIN64
+
+void throw_bad_last_alloc_exception_v4() {
+    throw_exception_v4(eid_bad_last_alloc);
+}
+
+void throw_exception_v4 ( exception_id eid ) {
+    __TBB_ASSERT ( eid > 0 && eid < eid_max, "Unknown exception ID" );
+    switch ( eid ) {
+    case eid_bad_alloc: DO_THROW( bad_alloc, () );
+    case eid_bad_last_alloc: DO_THROW( bad_last_alloc, () );
+    case eid_nonpositive_step: DO_THROW( invalid_argument, ("Step must be positive") );
+    case eid_out_of_range: DO_THROW( out_of_range, ("Index out of requested size range") );
+    case eid_segment_range_error: DO_THROW( range_error, ("Index out of allocated segment slots") );
+    case eid_index_range_error: DO_THROW( range_error, ("Index is not allocated") );
+    case eid_missing_wait: DO_THROW( missing_wait, () );
+    case eid_invalid_multiple_scheduling: DO_THROW( invalid_multiple_scheduling, () );
+    case eid_improper_lock: DO_THROW( improper_lock, () );
+    case eid_possible_deadlock: DO_THROW( runtime_error, ("Resource deadlock would occur") );
+    case eid_operation_not_permitted: DO_THROW( runtime_error, ("Operation not permitted") );
+    case eid_condvar_wait_failed: DO_THROW( runtime_error, ("Wait on condition variable failed") );
+    case eid_invalid_load_factor: DO_THROW( out_of_range, ("Invalid hash load factor") );
+    case eid_reserved: DO_THROW( out_of_range, ("[backward compatibility] Invalid number of buckets") );
+    case eid_invalid_swap: DO_THROW( invalid_argument, ("swap() is invalid on non-equal allocators") );
+    case eid_reservation_length_error: DO_THROW( length_error, ("reservation size exceeds permitted max size") );
+    case eid_invalid_key: DO_THROW( out_of_range, ("invalid key") );
+    default: break;
+    }
+#if !TBB_USE_EXCEPTIONS && __APPLE__
+    out_of_range e1("");
+    length_error e2("");
+    range_error e3("");
+    invalid_argument e4("");
+#endif /* !TBB_USE_EXCEPTIONS && __APPLE__ */
+}
+
+#if _XBOX
+bool GetBoolEnvironmentVariable( const char * name ) { return false;}
+#else
+bool GetBoolEnvironmentVariable( const char * name ) {
+    if( const char* s = getenv(name) )
+        return strcmp(s,"0") != 0;
+    return false;
+}
+#endif /* !_XBOX */
+
+#include "tbb_version.h"
+
+/** The leading "\0" is here so that applying "strings" to the binary delivers a clean result. */
+static const char VersionString[] = "\0" TBB_VERSION_STRINGS;
+
+static bool PrintVersionFlag = false;
+
+void PrintVersion() {
+    PrintVersionFlag = true;
+    fputs(VersionString+1,stderr);
+}
+
+void PrintExtraVersionInfo( const char* category, const char* description ) {
+    if( PrintVersionFlag ) 
+        fprintf(stderr, "%s: %s\t%s\n", "TBB", category, description );
+}
+
+void PrintRMLVersionInfo( void* arg, const char* server_info ) {
+    PrintExtraVersionInfo( server_info, (const char *)arg );
+}
+
+} // namespace internal
+extern "C" int TBB_runtime_interface_version() {
+    return TBB_INTERFACE_VERSION;
+}
+
+} // namespace tbb
+
+#if !__TBB_RML_STATIC
+#if __TBB_x86_32
+
+#include "tbb/atomic.h"
+
+// in MSVC environment, int64_t defined in tbb::internal namespace only (see tbb_stddef.h)
+#if _MSC_VER
+using tbb::internal::int64_t;
+#endif
+
+//! Warn about 8-byte store that crosses a cache line.
+extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ) {
+    // Report run-time warning unless we have already recently reported warning for that address.
+    const unsigned n = 4;
+    static tbb::atomic<void*> cache[n];
+    static tbb::atomic<unsigned> k;
+    for( unsigned i=0; i<n; ++i ) 
+        if( ptr==cache[i] ) 
+            goto done;
+    cache[(k++)%n] = const_cast<void*>(ptr);
+    tbb::internal::runtime_warning( "atomic store on misaligned 8-byte location %p is slow", ptr );
+done:;
+}
+
+//! Handle 8-byte store that crosses a cache line.
+extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ) {
+    for( tbb::internal::atomic_backoff b;; b.pause() ) {
+        int64_t tmp = *(int64_t*)ptr;
+        if( __TBB_CompareAndSwap8(ptr,value,tmp)==tmp ) 
+            break;
+    }
+}
+
+#endif /* __TBB_x86_32 */
+#endif /* !__TBB_RML_STATIC */
+
+#if __TBB_ipf
+extern "C" intptr_t __TBB_machine_lockbyte( volatile unsigned char& flag ) {
+    if ( !__TBB_TryLockByte(flag) ) {
+        tbb::internal::atomic_backoff b;
+        do {
+            b.pause();
+        } while ( !__TBB_TryLockByte(flag) );
+    }
+    return 0;
+}
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_misc.h
new file mode 100644 (file)
index 0000000..ad3389f
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_tbb_misc_H
+#define _TBB_tbb_misc_H
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/tbb_machine.h"
+
+#if _WIN32||_WIN64
+#include "tbb/machine/windows_api.h"
+#elif __linux__
+#include <sys/sysinfo.h>
+#define __TBB_DetectNumberOfWorkers() get_nprocs()
+#elif defined(__sun)
+#include <sys/sysinfo.h>
+#include <unistd.h>
+#elif defined(__FreeBSD__) || defined(_AIX)
+#include <unistd.h>
+#endif
+
+namespace tbb {
+namespace internal {
+
+const size_t MByte = 1<<20;
+
+#if !defined(__TBB_WORDSIZE)
+    const size_t ThreadStackSize = 1*MByte;
+#elif __TBB_WORDSIZE<=4
+    const size_t ThreadStackSize = 2*MByte;
+#else
+    const size_t ThreadStackSize = 4*MByte;
+#endif
+
+#if defined(__TBB_DetectNumberOfWorkers) // covers Linux, Mac OS*, and other platforms
+
+static inline int DetectNumberOfWorkers() {
+    int n = __TBB_DetectNumberOfWorkers(); 
+    return n>0? n: 1; // Fail safety strap
+}
+
+#else /* !__TBB_DetectNumberOfWorkers */
+
+#if _WIN32||_WIN64
+
+static inline int DetectNumberOfWorkers() {
+    SYSTEM_INFO si;
+    GetSystemInfo(&si);
+    return static_cast<int>(si.dwNumberOfProcessors);
+}
+
+#elif defined(_SC_NPROCESSORS_ONLN)
+
+static inline int DetectNumberOfWorkers() {
+    int number_of_workers = sysconf(_SC_NPROCESSORS_ONLN);
+    return number_of_workers>0? number_of_workers: 1;
+}
+
+#else
+#error DetectNumberOfWorkers: Method to detect the number of available CPUs is unknown
+#endif /* os kind */
+
+#endif /* !__TBB_DetectNumberOfWorkers */
+
+//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info
+void handle_win_error( int error_code );
+
+//! True if environment variable with given name is set and not 0; otherwise false.
+bool GetBoolEnvironmentVariable( const char * name );
+
+//! Print TBB version information on stderr
+void PrintVersion();
+
+//! Print extra TBB version information on stderr
+void PrintExtraVersionInfo( const char* category, const char* description );
+
+//! A callback routine to print RML version information on stderr
+void PrintRMLVersionInfo( void* arg, const char* server_info );
+
+// For TBB compilation only; not to be used in public headers
+#if defined(min) || defined(max)
+#undef min
+#undef max
+#endif
+
+//! Utility template function returning lesser of the two values.
+/** Provided here to avoid including not strict safe <algorithm>.\n
+    In case operands cause signed/unsigned or size mismatch warnings it is caller's
+    responsibility to do the appropriate cast before calling the function. **/
+template<typename T1, typename T2>
+T1 min ( const T1& val1, const T2& val2 ) {
+    return val1 < val2 ? val1 : val2;
+}
+
+//! Utility template function returning greater of the two values.
+/** Provided here to avoid including not strict safe <algorithm>.\n
+    In case operands cause signed/unsigned or size mismatch warnings it is caller's
+    responsibility to do the appropriate cast before calling the function. **/
+template<typename T1, typename T2>
+T1 max ( const T1& val1, const T2& val2 ) {
+    return val1 < val2 ? val2 : val1;
+}
+
+//------------------------------------------------------------------------
+// FastRandom
+//------------------------------------------------------------------------
+
+/** Defined in tbb_main.cpp **/
+unsigned GetPrime ( unsigned seed );
+
+//! A fast random number generator.
+/** Uses linear congruential method. */
+class FastRandom {
+    unsigned x, a;
+public:
+    //! Get a random number.
+    unsigned short get() {
+        return get(x);
+    }
+    //! Get a random number for the given seed; update the seed for next use.
+    unsigned short get( unsigned& seed ) {
+        unsigned short r = (unsigned short)(seed>>16);
+        seed = seed*a+1;
+        return r;
+    }
+    //! Construct a random number generator.
+    FastRandom( unsigned seed ) {
+        x = seed;
+        a = GetPrime( seed );
+    }
+};
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* _TBB_tbb_misc_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_resource.rc b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_resource.rc
new file mode 100644 (file)
index 0000000..b20cb53
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+// Microsoft Visual C++ generated resource script.
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NO_MFC                     1
+#define _APS_NEXT_RESOURCE_VALUE        102
+#define _APS_NEXT_COMMAND_VALUE         40001
+#define _APS_NEXT_CONTROL_VALUE         1001
+#define _APS_NEXT_SYMED_VALUE           101
+#endif
+#endif
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include <winresrc.h>
+#define ENDL "\r\n"
+#include "tbb_version.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// Neutral resources
+
+//#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU)
+#ifdef _WIN32
+LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
+#pragma code_page(1252)
+#endif //_WIN32
+
+/////////////////////////////////////////////////////////////////////////////
+// manifest integration
+#ifdef TBB_MANIFEST
+#include "winuser.h"
+2 RT_MANIFEST tbbmanifest.exe.manifest
+#endif
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION TBB_VERNUMBERS
+ PRODUCTVERSION TBB_VERNUMBERS
+ FILEFLAGSMASK 0x17L
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x40004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+    BLOCK "StringFileInfo"
+    BEGIN
+        BLOCK "000004b0"
+        BEGIN
+            VALUE "CompanyName", "Intel Corporation\0"
+            VALUE "FileDescription", "Threading Building Blocks library\0"
+            VALUE "FileVersion", TBB_VERSION "\0"
+//what is it?            VALUE "InternalName", "tbb\0"
+            VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation.  All Rights Reserved.\0"
+            VALUE "LegalTrademarks", "\0"
+#ifndef TBB_USE_DEBUG
+            VALUE "OriginalFilename", "tbb.dll\0"
+#else
+            VALUE "OriginalFilename", "tbb_debug.dll\0"
+#endif
+            VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0"
+            VALUE "ProductVersion", TBB_VERSION "\0"
+            VALUE "Comments", TBB_VERSION_STRINGS "\0"
+            VALUE "PrivateBuild", "\0"
+            VALUE "SpecialBuild", "\0"
+        END
+    END
+    BLOCK "VarFileInfo"
+    BEGIN
+        VALUE "Translation", 0x0, 1200
+    END
+END
+
+//#endif    // Neutral resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////
+#endif    // not APSTUDIO_INVOKED
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.cpp
new file mode 100644 (file)
index 0000000..b894664
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb_statistics.h"
+
+#if __TBB_STATISTICS
+
+#include <climits>
+#include <cstdarg>
+#if __TBB_STATISTICS_STDOUT
+#include <cstdio>
+#endif
+
+#include "tbb/spin_mutex.h"
+
+namespace tbb {
+namespace internal {
+
+//! Human readable titles of statistics groups defined by statistics_groups enum.
+/** The order of this vector elements must correspond to the statistics_counters 
+    structure layout. **/
+const char* StatGroupTitles[] = { 
+    "task objects", "tasks executed", "stealing attempts", "task proxies", "arena", "market"
+};
+
+//! Human readable titles of statistics elements defined by statistics_counters struct.
+/** The order of this vector elements must correspond to the statistics_counters 
+    structure layout (with NULLs interspersed to separate groups). **/
+const char* StatFieldTitles[] = {
+    "active", "freed", "big", NULL,
+    "total", "w/o spawn", NULL,
+    "succeeded", "failed", "conflicts", NULL,
+    "mailed", "revoked", "stolen", "bypassed", "ignored", NULL,
+    "switches", "roundtrips", NULL,
+    "roundtrips", NULL,
+};
+
+//! Class for logging statistics
+/** There should be only one instance of this class. 
+    Results are written to a file "statistics.txt" in tab-separated format. */
+class statistics_logger {
+public:
+    statistics_logger () {
+        __TBB_ASSERT( sg_end - 1 == 1 << (sizeof(StatGroupTitles)/sizeof(*StatGroupTitles) - 1), NULL );
+
+        my_file = fopen("statistics.txt","w");
+        if( !my_file )
+            perror("fopen(\"statistics.txt\"\")");
+        // Initialize groups dump layout info
+        group_start_field[0] = 0;
+        for ( size_t i = 0, j = 0; i < NumGroups; ++i, ++j ) {
+            __TBB_ASSERT( StatFieldTitles[j], "Empty group occurred" );
+            while ( StatFieldTitles[j] )
+                ++j;
+            group_start_field[i + 1] = j - i; // -i accounts for preceding NULL separators
+        }
+        __TBB_ASSERT( group_start_field[NumGroups] == statistics_counters::size(),
+                      "Wrong number of elements in StatFieldTitles" );
+        dump( "%-*s", IDColumnWidth, "");
+        process_groups( &statistics_logger::print_group_title );
+        dump( "%-*s", IDColumnWidth, "ID");
+        process_groups( &statistics_logger::print_field_titles );
+    }
+
+    ~statistics_logger () { fclose(my_file); }
+
+    void record( const statistics_counters& c, size_t id ) {
+        spin_mutex::scoped_lock lock(my_mutex);
+        counters_to_dump = &c;
+        const char* idString = NULL;
+        switch ( id ) {
+        case 0:
+            idString = "M"; break;
+        case workers_counters_total:
+            idString = "Wtot"; break;
+        case arena_counters_total:
+            idString = "Tot"; break;
+        default:
+            dump( "W%-*u", IDColumnWidth - 1, id);
+        }
+        if ( idString )
+            dump( "%-*s", IDColumnWidth, idString);
+        process_groups( &statistics_logger::print_field_values );
+    }
+private:
+    static const size_t IDColumnWidth = 5;
+    static const size_t StatisticsColumnWidth = 10;
+    static const size_t NumGroups = sizeof(StatGroupTitles)/sizeof(char*);
+
+    //! File into which statistics are written.
+    FILE* my_file;
+    //! Mutex that serializes accesses to my_file
+    spin_mutex my_mutex;
+    //! Indices of the each group's first field in statistics_counters struct.
+    /** An extra element is used to track the total number of statistics fields. **/
+    size_t group_start_field[NumGroups + 1];
+    //! Currently processed set of counters.
+    const statistics_counters* counters_to_dump;
+
+    void dump ( char const* fmt, ... ) {
+        va_list args;
+        va_start( args, fmt );
+        if ( my_file )
+            vfprintf( my_file, fmt, args );
+        va_start( args, fmt );
+#if __TBB_STATISTICS_STDOUT
+        vprintf( fmt, args );
+#endif
+    }
+
+    void process_groups ( void (statistics_logger::*per_group_action)(size_t group_idx) ) {
+        for ( size_t i = 0, group_flag = 1; i < NumGroups; ++i, group_flag <<= 1 ) {
+            __TBB_ASSERT( group_flag < sg_end, "StatGroupTitles contents is incompatible with statistics_groups definition" );
+            if ( __TBB_ActiveStatisticsGroups & group_flag )
+                (this->*per_group_action)( i );
+        }
+        dump( "\n" );
+    }
+
+    void print_group_title ( size_t group_idx ) {
+        dump( "%-*s", (group_start_field[group_idx + 1] - group_start_field[group_idx]) * (StatisticsColumnWidth + 1),
+                        StatGroupTitles[group_idx] );
+    }
+
+    void print_field_titles ( size_t group_idx ) {
+        // +group_idx accounts for preceding NULL separators
+        size_t i = group_start_field[group_idx] + group_idx;
+        while ( StatFieldTitles[i] )
+            dump( "%-*s ", StatisticsColumnWidth, StatFieldTitles[i++] );
+    }
+
+    void print_field_values ( size_t group_idx ) {
+        size_t begin = group_start_field[group_idx],
+               end = group_start_field[group_idx + 1];
+        for ( size_t i = begin; i < end; ++i )
+            dump( "%-*ld ", StatisticsColumnWidth, counters_to_dump->field(i) );
+    }
+}; // class statistics_logger
+
+static statistics_logger the_statistics;
+
+void dump_statistics ( const statistics_counters& c, size_t id ) {
+    the_statistics.record(c, id);
+}
+
+} // namespace internal
+} // namespace tbb
+
+#endif /* __TBB_STATISTICS */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_statistics.h
new file mode 100644 (file)
index 0000000..7e59e02
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_tbb_statistics_H
+#define _TBB_tbb_statistics_H
+
+/**
+    This file defines parameters of the internal statistics collected by the TBB
+    library (currently by the task scheduler only).
+    
+    In __TBB_ARENA_PER_MASTER implementation statistics is accumulated in each 
+    thread separately and is dumped when the scheduler instance in the given 
+    thread is destroyed. For apps with multiple master threads or with the same 
+    master repeatedly initializing and then deinitializing task scheduler this 
+    results in TBB workers statistics getting unseparably mixed.
+    
+    Therefore in new __TBB_ARENA_PER_MASTER mode statistics is instead accumulated
+    in arena slots, and should be dumped when arena gets destroyed. This separates
+    statistics collected for each scheduler activity region in each master thread.
+
+    With the current RML implementation (TBB 2.2, 3.0) to avoid complete loss of 
+    statistics data during app shutdown (because of lazy workers deinitialization 
+    logic) set __TBB_STATISTICS_EARLY_DUMP macro to write the statistics at the 
+    moment a master thread deinitializes its scheduler. This may happen a little 
+    earlier than the moment of arena destruction resulting in the following undesired
+    (though usually tolerable) effects:
+    - a few events related to unsuccessful stealing or thread pool activity may be lost,
+    - statistics may be substantially incomplete in case of FIFO tasks used in 
+      the FAF mode.
+
+    Macro __TBB_STATISTICS_STDOUT and global variable __TBB_ActiveStatisticsGroups
+    defined below can be used to configure the statistics output.
+
+    To add new counter:
+    1) Insert it into the appropriate group range in statistics_counters;
+    2) Insert the corresponding field title into StatFieldTitles (preserving 
+       relative order of the fields).
+
+    To add new counters group:
+    1) Insert new group bit flag into statistics_groups;
+    2) Insert the new group title into StatGroupTitles (preserving 
+       relative order of the groups).
+    3) Add counter belonging to the new group as described above
+**/
+
+#include "tbb/tbb_stddef.h"
+
+#ifndef __TBB_STATISTICS
+#define __TBB_STATISTICS 0
+#endif /* __TBB_STATISTICS */
+
+#if __TBB_STATISTICS
+
+#include <string.h>  // for memset
+
+//! Dump counters into stdout as well.
+/** By default statistics counters are written to the file "statistics.txt" only. **/
+#define __TBB_STATISTICS_STDOUT 1
+
+//! Dump statistics for an arena when its master completes
+/** By default (when this macro is not set) the statistics is sent to output when
+    arena object is destroyed. But with the current lazy workers termination
+    logic default behavior may result in loosing all statistics output. **/
+#define __TBB_STATISTICS_EARLY_DUMP 1
+
+#define GATHER_STATISTIC(x) (x)
+
+namespace tbb {
+namespace internal {
+
+//! Groups of statistics counters.
+/** The order of enumerators must be the same as the order of the corresponding
+    field groups in the statistics_counters structure. **/
+enum statistics_groups {
+    sg_task_allocation = 0x01,
+    sg_task_execution = 0x02,
+    sg_stealing = 0x04,
+    sg_affinity = 0x08,
+    sg_arena = 0x10,
+    sg_market = 0x20,
+    // List end marker. Insert new groups only before it.
+    sg_end
+};
+
+//! Groups of counters to output
+const uintptr_t __TBB_ActiveStatisticsGroups = sg_task_execution | sg_stealing | sg_affinity | sg_arena | sg_market;
+
+//! A set of various statistics counters that are updated by the library on per thread basis.
+/** All the fields must be of the same type (statistics_counters::counter_type).
+    This is necessary to allow reinterpreting this structure as an array. **/
+struct statistics_counters {
+    typedef long counter_type;
+
+    // Group: sg_task_allocation
+    // Counters in this group can have negative values as the tasks migrate across 
+    // threads while the associated counters are updated in the current thread only
+    // to avoid data races
+    
+    //! Number of tasks allocated and not yet destroyed
+    counter_type active_tasks;
+    //! Number of task corpses stored for future reuse
+    counter_type free_list_length;
+    //! Number of big tasks allocated during the run
+    /** To find total number of tasks malloc'd, compute (big_tasks+small_task_count) */
+    counter_type big_tasks;
+    
+    // Group: sg_task_execution
+
+    //! Number of tasks executed
+    counter_type tasks_executed;
+    //! Number of elided spawns
+    counter_type spawns_bypassed;
+    
+    // Group: sg_stealing
+
+    //! Number of tasks successfully stolen
+    counter_type steals_committed;
+    //! Number of failed stealing attempts
+    counter_type steals_failed;
+    //! Number of failed stealing attempts
+    counter_type thieves_conflicts;
+    //! Number of tasks received from mailbox
+
+    // Group: sg_affinity
+
+    counter_type mails_received;
+    //! Number of affinitized tasks executed by the owner
+    /** Goes as "revoked" in statistics printout. **/
+    counter_type proxies_executed;
+    //! Number of affinitized tasks intercepted by thieves 
+    counter_type proxies_stolen;
+    //! Number of proxy bypasses by thieves during stealing
+    counter_type proxies_bypassed;
+    //! Number of affinitized tasks executed by the owner via scheduler bypass mechanism
+    counter_type affinity_ignored;
+
+    // Group: sg_arena
+
+    //! Number of times the state of arena switched between "full" and "empty"
+    counter_type gate_switches;
+    //! Number of times workers left an arena and returned into the market
+    counter_type arena_roundtrips;
+    //! Number of times workers left the market and returned into RML
+    counter_type market_roundtrips;
+
+    // Constructor and helpers
+
+    statistics_counters() { reset(); }
+
+    void reset () { memset( this, 0, sizeof(statistics_counters) ); }
+
+    counter_type& field ( size_t index ) { return reinterpret_cast<counter_type*>(this)[index]; }
+
+    const counter_type& field ( size_t index ) const { return reinterpret_cast<const counter_type*>(this)[index]; }
+
+    static size_t size () { return sizeof(statistics_counters) / sizeof(counter_type); }
+
+    const statistics_counters& operator += ( const statistics_counters& rhs ) {
+        for ( size_t i = 0; i < size(); ++i )
+            field(i) += rhs.field(i);
+        return *this;
+    }
+}; // statistics_counters
+
+static const size_t workers_counters_total = (size_t)-1;
+static const size_t arena_counters_total = (size_t)-2;
+
+void dump_statistics ( const statistics_counters& c, size_t id );
+
+} // namespace internal
+} // namespace tbb
+
+#else /* !__TBB_STATISTICS */
+
+#define GATHER_STATISTIC(x) ((void)0)
+
+#endif /* !__TBB_STATISTICS */
+
+#endif /* _TBB_tbb_statistics_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_thread.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_thread.cpp
new file mode 100644 (file)
index 0000000..40cb56f
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if _WIN32||_WIN64
+#include <process.h>        /* Need _beginthreadex from there */
+#endif
+#include "tbb_misc.h"       // handle_win_error, ThreadStackSize
+#include "tbb/tbb_stddef.h"
+#include "tbb/tbb_thread.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/task_scheduler_init.h" /* Need task_scheduler_init::default_num_threads() */
+
+namespace tbb {
+namespace internal {
+
+//! Allocate a closure
+void* allocate_closure_v3( size_t size )
+{
+    return allocate_via_handler_v3( size );
+}
+
+//! Free a closure allocated by allocate_closure_v3
+void free_closure_v3( void *ptr )
+{
+    deallocate_via_handler_v3( ptr );
+}
+
+void tbb_thread_v3::join()
+{
+    __TBB_ASSERT( joinable(), "thread should be joinable when join called" );
+#if _WIN32||_WIN64 
+    DWORD status = WaitForSingleObject( my_handle, INFINITE );
+    if ( status == WAIT_FAILED )
+        handle_win_error( GetLastError() );
+    BOOL close_stat = CloseHandle( my_handle );
+    if ( close_stat == 0 )
+        handle_win_error( GetLastError() );
+    my_thread_id = 0;
+#else
+    int status = pthread_join( my_handle, NULL );
+    if( status )
+        handle_perror( status, "pthread_join" );
+#endif // _WIN32||_WIN64 
+    my_handle = 0;
+}
+
+void tbb_thread_v3::detach() {
+    __TBB_ASSERT( joinable(), "only joinable thread can be detached" );
+#if _WIN32||_WIN64
+    BOOL status = CloseHandle( my_handle );
+    if ( status == 0 )
+      handle_win_error( GetLastError() );
+    my_thread_id = 0;
+#else
+    int status = pthread_detach( my_handle );
+    if( status )
+        handle_perror( status, "pthread_detach" );
+#endif // _WIN32||_WIN64
+    my_handle = 0;
+}
+
+void tbb_thread_v3::internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine),
+                                    void* closure ) {
+#if _WIN32||_WIN64
+    unsigned thread_id;
+    // The return type of _beginthreadex is "uintptr_t" on new MS compilers,
+    // and 'unsigned long' on old MS compilers.  uintptr_t works for both.
+    uintptr_t status = _beginthreadex( NULL, ThreadStackSize, start_routine,
+                                     closure, 0, &thread_id ); 
+    if( status==0 )
+        handle_perror(errno,"__beginthreadex");
+    else {
+        my_handle = (HANDLE)status;
+        my_thread_id = thread_id;
+    }
+#else
+    pthread_t thread_handle;
+    int status;
+    pthread_attr_t stack_size;
+    status = pthread_attr_init( &stack_size );
+    if( status )
+        handle_perror( status, "pthread_attr_init" );
+    status = pthread_attr_setstacksize( &stack_size, ThreadStackSize );
+    if( status )
+        handle_perror( status, "pthread_attr_setstacksize" );
+
+    status = pthread_create( &thread_handle, &stack_size, start_routine, closure );
+    if( status )
+        handle_perror( status, "pthread_create" );
+
+    my_handle = thread_handle;
+#endif // _WIN32||_WIN64
+}
+
+unsigned tbb_thread_v3::hardware_concurrency() {
+    return task_scheduler_init::default_num_threads();
+}
+
+tbb_thread_v3::id thread_get_id_v3() {
+#if _WIN32||_WIN64
+    return tbb_thread_v3::id( GetCurrentThreadId() );
+#else
+    return tbb_thread_v3::id( pthread_self() );
+#endif // _WIN32||_WIN64
+}
+    
+void move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 )
+{
+    if (t1.joinable())
+        t1.detach();
+    t1.my_handle = t2.my_handle;
+    t2.my_handle = 0;
+#if _WIN32||_WIN64
+    t1.my_thread_id = t2.my_thread_id;
+    t2.my_thread_id = 0;
+#endif // _WIN32||_WIN64
+}
+
+void thread_yield_v3()
+{
+    __TBB_Yield();
+}
+
+void thread_sleep_v3(const tick_count::interval_t &i)
+{
+#if _WIN32||_WIN64
+     tick_count t0 = tick_count::now();
+     tick_count t1 = t0;
+     for(;;) {
+         double remainder = (i-(t1-t0)).seconds()*1e3;  // milliseconds remaining to sleep
+         if( remainder<=0 ) break;
+         DWORD t = remainder>=INFINITE ? INFINITE-1 : DWORD(remainder);
+         Sleep( t );
+         t1 = tick_count::now();
+    }
+#else
+    struct timespec req;
+    double sec = i.seconds();
+
+    req.tv_sec = static_cast<long>(sec);
+    req.tv_nsec = static_cast<long>( (sec - req.tv_sec)*1e9 );
+    nanosleep(&req, NULL);
+#endif // _WIN32||_WIN64
+}
+
+} // internal
+} // tbb
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_version.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tbb_version.h
new file mode 100644 (file)
index 0000000..efb9b75
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Please define version number in the file:
+#include "tbb/tbb_stddef.h"
+
+// And don't touch anything below
+#ifndef ENDL
+#define ENDL "\n"
+#endif
+#include "version_string.tmp"
+
+#ifndef __TBB_VERSION_STRINGS
+#pragma message("Warning: version_string.tmp isn't generated properly by version_info.sh script!")
+// here is an example of macros value:
+#define __TBB_VERSION_STRINGS \
+"TBB: BUILD_HOST\tUnknown\n" \
+"TBB: BUILD_ARCH\tUnknown\n" \
+"TBB: BUILD_OS\t\tUnknown\n" \
+"TBB: BUILD_CL\t\tUnknown\n" \
+"TBB: BUILD_COMPILER\tUnknown\n" \
+"TBB: BUILD_COMMAND\tUnknown\n"
+#endif
+#ifndef __TBB_DATETIME
+#ifdef RC_INVOKED
+#define __TBB_DATETIME "Unknown"
+#else
+#define __TBB_DATETIME __DATE__ __TIME__
+#endif
+#endif
+
+#define __TBB_VERSION_NUMBER "TBB: VERSION\t\t" __TBB_STRING(TBB_VERSION_MAJOR.TBB_VERSION_MINOR) ENDL
+#define __TBB_INTERFACE_VERSION_NUMBER "TBB: INTERFACE VERSION\t" __TBB_STRING(TBB_INTERFACE_VERSION) ENDL
+#define __TBB_VERSION_DATETIME "TBB: BUILD_DATE\t\t" __TBB_DATETIME ENDL
+#ifndef TBB_USE_DEBUG
+    #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\tundefined" ENDL
+#elif TBB_USE_DEBUG==0
+    #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t0" ENDL
+#elif TBB_USE_DEBUG==1
+    #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t1" ENDL
+#elif TBB_USE_DEBUG==2
+    #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t2" ENDL
+#else
+    #error Unexpected value for TBB_USE_DEBUG
+#endif
+#ifndef TBB_USE_ASSERT
+    #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\tundefined" ENDL
+#elif TBB_USE_ASSERT==0
+    #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t0" ENDL
+#elif TBB_USE_ASSERT==1
+    #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t1" ENDL
+#elif TBB_USE_ASSERT==2
+    #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t2" ENDL
+#else
+    #error Unexpected value for TBB_USE_ASSERT
+#endif
+#ifndef DO_ITT_NOTIFY
+    #define __TBB_VERSION_DO_NOTIFY "TBB: DO_ITT_NOTIFY\tundefined" ENDL
+#elif DO_ITT_NOTIFY==1
+    #define __TBB_VERSION_DO_NOTIFY "TBB: DO_ITT_NOTIFY\t1" ENDL
+#elif DO_ITT_NOTIFY==0
+    #define __TBB_VERSION_DO_NOTIFY
+#else
+    #error Unexpected value for DO_ITT_NOTIFY
+#endif
+
+#define TBB_VERSION_STRINGS __TBB_VERSION_NUMBER __TBB_INTERFACE_VERSION_NUMBER __TBB_VERSION_DATETIME __TBB_VERSION_STRINGS __TBB_VERSION_USE_DEBUG __TBB_VERSION_USE_ASSERT __TBB_VERSION_DO_NOTIFY
+
+// numbers
+#ifndef __TBB_VERSION_YMD
+#define __TBB_VERSION_YMD 0, 0
+#endif
+
+#define TBB_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD
+
+#define TBB_VERSION __TBB_STRING(TBB_VERNUMBERS)
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tls.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tls.h
new file mode 100644 (file)
index 0000000..4baa84d
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_tls_H
+#define _TBB_tls_H
+
+#if USE_PTHREAD
+#include <pthread.h>
+#else /* assume USE_WINTHREAD */
+#include "tbb/machine/windows_api.h"
+#endif
+
+namespace tbb {
+
+namespace internal {
+
+typedef void (*tls_dtor_t)(void*);
+
+//! Basic cross-platform wrapper class for TLS operations.
+template <typename T>
+class basic_tls {
+#if USE_PTHREAD
+    typedef pthread_key_t tls_key_t;
+public:
+    int  create( tls_dtor_t dtor = NULL ) {
+        return pthread_key_create(&my_key, dtor);
+    }
+    int  destroy()      { return pthread_key_delete(my_key); }
+    void set( T value ) { pthread_setspecific(my_key, (void*)value); }
+    T    get()          { return (T)pthread_getspecific(my_key); }
+#else /* USE_WINTHREAD */
+    typedef DWORD tls_key_t;
+public:
+    int create() {
+        tls_key_t tmp = TlsAlloc();
+        if( tmp==TLS_OUT_OF_INDEXES )
+            return TLS_OUT_OF_INDEXES;
+        my_key = tmp;
+        return 0;
+    }
+    int  destroy()      { TlsFree(my_key); my_key=0; return 0; }
+    void set( T value ) { TlsSetValue(my_key, (LPVOID)value); }
+    T    get()          { return (T)TlsGetValue(my_key); }
+#endif
+private:
+    tls_key_t my_key;
+};
+
+//! More advanced TLS support template class.
+/** It supports RAII and to some extent mimic __declspec(thread) variables. */
+template <typename T>
+class tls : public basic_tls<T> {
+    typedef basic_tls<T> base;
+public:
+    tls()  { base::create();  }
+    ~tls() { base::destroy(); }
+    T operator=(T value) { base::set(value); return value; }
+    operator T() { return base::get(); }
+};
+
+template <typename T>
+class tls<T*> : basic_tls<T*> {
+    typedef basic_tls<T*> base;
+    static void internal_dtor(void* ptr) {
+        if (ptr) delete (T*)ptr;
+    }
+    T* internal_get() {
+        T* result = base::get();
+        if (!result) {
+            result = new T;
+            base::set(result);
+        }
+        return result;
+    }
+public:
+    tls()  {
+#if USE_PTHREAD
+        base::create( internal_dtor );
+#else
+        base::create();
+#endif
+    }
+    ~tls() { base::destroy(); }
+    T* operator=(T* value) { base::set(value); return value; }
+    operator T*()   { return  internal_get(); }
+    T* operator->() { return  internal_get(); }
+    T& operator*()  { return *internal_get(); }
+};
+
+} // namespace internal
+
+} // namespace tbb
+
+#endif /* _TBB_tls_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/disable_warnings.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/disable_warnings.h
new file mode 100644 (file)
index 0000000..28100e1
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+
+#pragma warning (disable: 593)   /* parameter "XXXX" was set but never used                 */
+#pragma warning (disable: 344)   /* typedef name has already been declared (with same type) */
+#pragma warning (disable: 174)   /* expression has no effect                                */
+#pragma warning (disable: 4127)  /* conditional expression is constant                      */
+#pragma warning (disable: 4306)  /* conversion from '?' to '?' of greater size              */
+
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if defined __INTEL_COMPILER
+
+#pragma warning (disable: 869)  /* parameter "XXXXX" was never referenced                  */
+#pragma warning (disable: 1418) /* external function definition with no prior declaration  */
+#pragma warning (disable: 1419) /* external declaration in primary source file             */
+
+#endif /* __INTEL_COMPILER */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h
new file mode 100644 (file)
index 0000000..f037807
--- /dev/null
@@ -0,0 +1,661 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _INTERNAL_ITTNOTIFY_H_
+#define _INTERNAL_ITTNOTIFY_H_
+/**
+ * @file
+ * @brief Internal User API functions and types
+ */
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+#  define ITT_OS_WIN   1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+#  define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+#  define ITT_OS_MAC   3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+#  if defined WIN32 || defined _WIN32
+#    define ITT_OS ITT_OS_WIN
+#  elif defined( __APPLE__ ) && defined( __MACH__ )
+#    define ITT_OS ITT_OS_MAC
+#  else
+#    define ITT_OS ITT_OS_LINUX
+#  endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+#  define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+#  define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+#  if ITT_OS==ITT_OS_WIN
+#    define ITT_PLATFORM ITT_PLATFORM_WIN
+#  else
+#    define ITT_PLATFORM ITT_PLATFORM_POSIX
+#  endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#include <stddef.h>
+#include <stdarg.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define CDECL __cdecl
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define CDECL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define STDCALL __stdcall
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define STDCALL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI    CDECL
+#define LIBITTAPI /* nothing */
+
+#define ITT_JOIN_AUX(p,n) p##n
+#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)
+
+#ifndef INTEL_ITTNOTIFY_PREFIX
+#  define INTEL_ITTNOTIFY_PREFIX __itt_
+#endif /* INTEL_ITTNOTIFY_PREFIX */
+#ifndef INTEL_ITTNOTIFY_POSTFIX
+#  define INTEL_ITTNOTIFY_POSTFIX _ptr_
+#endif /* INTEL_ITTNOTIFY_POSTFIX */
+
+#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
+#define ITTNOTIFY_NAME(n)     ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))
+
+#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)
+#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)
+
+#ifdef ITT_STUB
+#undef ITT_STUB
+#endif
+#ifdef ITT_STUBV
+#undef ITT_STUBV
+#endif
+#define ITT_STUBV(api,type,name,args,params)                      \
+    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \
+    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);
+#define ITT_STUB ITT_STUBV
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/** @endcond */
+
+/**
+ * @defgroup internal Internal API
+ * @{
+ * @}
+ */
+
+/**
+ * @defgroup makrs Marks
+ * @ingroup internal
+ * Marks group
+ * @warning Internal API:
+ *   - It is not shipped to outside of Intel
+ *   - It is delivered to internal Intel teams using e-mail or SVN access only
+ * @{
+ */
+/** @brief user mark type */
+typedef int __itt_mark_type;
+
+/**
+ * @brief Creates a user mark type with the specified name using char or Unicode string.
+ * @param[in] name - name of mark to create
+ * @return Returns a handle to the mark type
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_mark_type ITTAPI __itt_mark_createA(const char    *name);
+__itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name);
+#ifdef UNICODE
+#  define __itt_mark_create     __itt_mark_createW
+#  define __itt_mark_create_ptr __itt_mark_createW_ptr
+#else /* UNICODE */
+#  define __itt_mark_create     __itt_mark_createA
+#  define __itt_mark_create_ptr __itt_mark_createA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_mark_type ITTAPI __itt_mark_create(const char *name);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char    *name), (name))
+ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (name))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_mark_type, mark_create,  (const char *name), (name))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_createA     ITTNOTIFY_DATA(mark_createA)
+#define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA)
+#define __itt_mark_createW     ITTNOTIFY_DATA(mark_createW)
+#define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_create      ITTNOTIFY_DATA(mark_create)
+#define __itt_mark_create_ptr  ITTNOTIFY_NAME(mark_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_createA(name) (__itt_mark_type)0
+#define __itt_mark_createA_ptr 0
+#define __itt_mark_createW(name) (__itt_mark_type)0
+#define __itt_mark_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_create(name)  (__itt_mark_type)0
+#define __itt_mark_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_createA_ptr 0
+#define __itt_mark_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Creates a "discrete" user mark type of the specified type and an optional parameter using char or Unicode string.
+ *
+ * - The mark of "discrete" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign.
+ * - The call is "synchronous" - function returns after mark is actually added to results.
+ * - This function is useful, for example, to mark different phases of application
+ *   (beginning of the next mark automatically meand end of current region).
+ * - Can be used together with "continuous" marks (see below) at the same collection session
+ * @param[in] mt - mark, created by __itt_mark_create(const char* name) function
+ * @param[in] parameter - string parameter of mark
+ * @return Returns zero value in case of success, non-zero value otherwise.
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+int ITTAPI __itt_markA(__itt_mark_type mt, const char    *parameter);
+int ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter);
+#ifdef UNICODE
+#  define __itt_mark     __itt_markW
+#  define __itt_mark_ptr __itt_markW_ptr
+#else /* UNICODE  */
+#  define __itt_mark     __itt_markA
+#  define __itt_mark_ptr __itt_markA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+int ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char    *parameter), (mt, parameter))
+ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter), (mt, parameter))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int, mark,  (__itt_mark_type mt, const char *parameter), (mt, parameter))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_markA     ITTNOTIFY_DATA(markA)
+#define __itt_markA_ptr ITTNOTIFY_NAME(markA)
+#define __itt_markW     ITTNOTIFY_DATA(markW)
+#define __itt_markW_ptr ITTNOTIFY_NAME(markW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark      ITTNOTIFY_DATA(mark)
+#define __itt_mark_ptr  ITTNOTIFY_NAME(mark)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_markA(mt, parameter) (int)0
+#define __itt_markA_ptr 0
+#define __itt_markW(mt, parameter) (int)0
+#define __itt_markW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark(mt, parameter)  (int)0
+#define __itt_mark_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_markA_ptr 0
+#define __itt_markW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Use this if necessary to create a "discrete" user event type (mark) for process
+ * rather then for one thread
+ * @see int __itt_mark(__itt_mark_type mt, const char* parameter);
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+int ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char    *parameter);
+int ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter);
+#ifdef UNICODE
+#  define __itt_mark_global     __itt_mark_globalW
+#  define __itt_mark_global_ptr __itt_mark_globalW_ptr
+#else /* UNICODE  */
+#  define __itt_mark_global     __itt_mark_globalA
+#  define __itt_mark_global_ptr __itt_mark_globalA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+int ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char    *parameter), (mt, parameter))
+ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (mt, parameter))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int, mark_global,  (__itt_mark_type mt, const char *parameter), (mt, parameter))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_globalA     ITTNOTIFY_DATA(mark_globalA)
+#define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA)
+#define __itt_mark_globalW     ITTNOTIFY_DATA(mark_globalW)
+#define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_global      ITTNOTIFY_DATA(mark_global)
+#define __itt_mark_global_ptr  ITTNOTIFY_NAME(mark_global)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_globalA(mt, parameter) (int)0
+#define __itt_mark_globalA_ptr 0
+#define __itt_mark_globalW(mt, parameter) (int)0
+#define __itt_mark_globalW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_global(mt, parameter)  (int)0
+#define __itt_mark_global_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_mark_globalA_ptr 0
+#define __itt_mark_globalW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_mark_global_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Creates an "end" point for "continuous" mark with specified name.
+ *
+ * - Returns zero value in case of success, non-zero value otherwise.
+ *   Also returns non-zero value when preceding "begin" point for the
+ *   mark with the same name failed to be created or not created.
+ * - The mark of "continuous" type is placed to collection results in
+ *   case of success. It appears in overtime view(s) as a special tick
+ *   sign (different from "discrete" mark) together with line from
+ *   corresponding "begin" mark to "end" mark.
+ * @note Continuous marks can overlap and be nested inside each other.
+ * Discrete mark can be nested inside marked region
+ * @param[in] mt - mark, created by __itt_mark_create(const char* name) function
+ * @return Returns zero value in case of success, non-zero value otherwise.
+ */
+int ITTAPI __itt_mark_off(__itt_mark_type mt);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt), (mt))
+#define __itt_mark_off     ITTNOTIFY_DATA(mark_off)
+#define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_mark_off(mt) (int)0
+#define __itt_mark_off_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_mark_off_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Use this if necessary to create an "end" point for mark of process
+ * @see int __itt_mark_off(__itt_mark_type mt);
+ */
+int ITTAPI __itt_mark_global_off(__itt_mark_type mt);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt), (mt))
+#define __itt_mark_global_off     ITTNOTIFY_DATA(mark_global_off)
+#define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_mark_global_off(mt) (int)0
+#define __itt_mark_global_off_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_mark_global_off_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} marks group */
+
+/**
+ * @defgroup counters Counters
+ * @ingroup internal
+ * Counters group
+ * @{
+ */
+/**
+ * @brief opaque structure for counter identification
+ */
+typedef struct ___itt_counter *__itt_counter;
+
+/**
+ * @brief Create a counter with given name/domain for the calling thread
+ *
+ * After __itt_counter_create() is called, __itt_counter_inc() / __itt_counter_inc_delta() can be used
+ * to increment the counter on any thread
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_counter ITTAPI __itt_counter_createA(const char    *name, const char    *domain);
+__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain);
+#ifdef UNICODE
+#  define __itt_counter_create     __itt_counter_createW
+#  define __itt_counter_create_ptr __itt_counter_createW_ptr
+#else /* UNICODE */
+#  define __itt_counter_create     __itt_counter_createA
+#  define __itt_counter_create_ptr __itt_counter_createA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char    *name, const char    *domain), (name, domain))
+ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (name, domain))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_counter, counter_create,  (const char *name, const char *domain), (name, domain))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_counter_createA     ITTNOTIFY_DATA(counter_createA)
+#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA)
+#define __itt_counter_createW     ITTNOTIFY_DATA(counter_createW)
+#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_counter_create     ITTNOTIFY_DATA(counter_create)
+#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_counter_createA(name, domain)
+#define __itt_counter_createA_ptr 0
+#define __itt_counter_createW(name, domain)
+#define __itt_counter_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_counter_create(name, domain)
+#define __itt_counter_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_counter_createA_ptr 0
+#define __itt_counter_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_counter_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create()
+ */
+void ITTAPI __itt_counter_destroy(__itt_counter id);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (id))
+#define __itt_counter_destroy     ITTNOTIFY_VOID(counter_destroy)
+#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_counter_destroy(id)
+#define __itt_counter_destroy_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_counter_destroy_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Increment the counter value
+ */
+void ITTAPI __itt_counter_inc(__itt_counter id);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (id))
+#define __itt_counter_inc     ITTNOTIFY_VOID(counter_inc)
+#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_counter_inc(id)
+#define __itt_counter_inc_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_counter_inc_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Increment the counter value with x
+ */
+void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (id, value))
+#define __itt_counter_inc_delta     ITTNOTIFY_VOID(counter_inc_delta)
+#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_counter_inc_delta(id, value)
+#define __itt_counter_inc_delta_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_counter_inc_delta_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} counters group */
+
+/**
+ * @defgroup stitch Stack Stitching
+ * @ingroup internal
+ * Stack Stitching group
+ * @{
+ */
+/**
+ * @brief opaque structure for counter identification
+ */
+typedef struct ___itt_caller *__itt_caller;
+
+/**
+ * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to.
+ * The function returns a unique identifier which is used to match the cut points with corresponding stitch points.
+ */
+__itt_caller ITTAPI __itt_stack_caller_create(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void), ())
+#define __itt_stack_caller_create     ITTNOTIFY_DATA(stack_caller_create)
+#define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_stack_caller_create() (__itt_caller)0
+#define __itt_stack_caller_create_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_stack_caller_create_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create()
+ */
+void ITTAPI __itt_stack_caller_destroy(__itt_caller id);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id), (id))
+#define __itt_stack_caller_destroy     ITTNOTIFY_VOID(stack_caller_destroy)
+#define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_stack_caller_destroy(id)
+#define __itt_stack_caller_destroy_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_stack_caller_destroy_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Sets the cut point. Stack from each event which occurs after this call will be cut
+ * at the same stack level the function was called and stitched to the corresponding stitch point.
+ */
+void ITTAPI __itt_stack_callee_enter(__itt_caller id);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id), (id))
+#define __itt_stack_callee_enter     ITTNOTIFY_VOID(stack_callee_enter)
+#define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_stack_callee_enter(id)
+#define __itt_stack_callee_enter_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_stack_callee_enter_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter().
+ */
+void ITTAPI __itt_stack_callee_leave(__itt_caller id);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id), (id))
+#define __itt_stack_callee_leave     ITTNOTIFY_VOID(stack_callee_leave)
+#define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_stack_callee_leave(id)
+#define __itt_stack_callee_leave_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_stack_callee_leave_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/** @} stitch group */
+
+/* ***************************************************************************************************************************** */
+
+/** @cond exclude_from_documentation */
+typedef enum __itt_error_code {
+    __itt_error_success       = 0, /*!< no error */
+    __itt_error_no_module     = 1, /*!< module can't be loaded */
+    /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */
+    __itt_error_no_symbol     = 2, /*!< symbol not found */
+    /* %1$s -- library name, %2$s -- symbol name. */
+    __itt_error_unknown_group = 3, /*!< unknown group specified */
+    /* %1$s -- env var name, %2$s -- group name. */
+    __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */
+    /* %1$s -- env var name, %2$d -- system error. */
+    __itt_error_env_too_long  = 5, /*!< variable value too long */
+    /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */
+    __itt_error_system        = 6  /*!< pthread_mutexattr_init or pthread_mutex_init failed */
+    /* %1$s -- function name, %2$d -- errno. */
+} __itt_error_code;
+
+typedef void (__itt_error_notification_t)(__itt_error_code code, va_list);
+__itt_error_notification_t* __itt_set_error_handler(__itt_error_notification_t*);
+
+const char* ITTAPI __itt_api_version(void);
+/** @endcond */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler)
+void __itt_error_handler(__itt_error_code code, va_list args);
+extern const int ITTNOTIFY_NAME(err);
+#define __itt_err ITTNOTIFY_NAME(err)
+ITT_STUB(ITTAPI, const char*, api_version, (void), ())
+#define __itt_api_version     ITTNOTIFY_DATA(api_version)
+#define __itt_api_version_ptr ITTNOTIFY_NAME(api_version)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_api_version()   (const char*)0
+#define __itt_api_version_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_api_version_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/** @cond exclude_from_documentation */
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+/** @endcond */
+
+#endif /* _INTERNAL_ITTNOTIFY_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify.h
new file mode 100644 (file)
index 0000000..254f7d3
--- /dev/null
@@ -0,0 +1,1409 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _ITTNOTIFY_H_
+#define _ITTNOTIFY_H_
+/**
+ * @file
+ * @brief Public User API functions and types
+ * @mainpage
+ * Ability to control the collection during runtime. User API can be inserted into the user application.
+ * Commands include:
+ *   - Collection control
+ *   - Marking
+ *   - Thread manipulation
+ *   - User-defined synchronization primitives
+ *
+ * The User API provides ability to control the collection, set marks at the execution of specific user code and
+ * specify custom synchronization primitives implemented without standard system APIs.
+ *
+ * Use case: User inserts API calls to the desired places in her code. The code is then compiled and
+ * linked with static part of User API library. User can recompile the code with specific macro defined
+ * to enable API calls.  If this macro is not defined there is no run-time overhead and no need to  link
+ * with static part of User API library. During  runtime the static library loads and initializes the dynamic part.
+ * In case of instrumentation-based collection, only a stub library is loaded; otherwise a proxy library is loaded,
+ * which calls the collector.
+ *  
+ * User API set is native (C/C++) only (no MRTE support). As a mitigation can use JNI or C/C++ function
+ * call from managed code where needed. If the collector causes significant overhead or data storage, then
+ * pausing analysis should reduce the overhead to minimal levels.
+ */
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+#  define ITT_OS_WIN   1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+#  define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+#  define ITT_OS_MAC   3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+#  if defined WIN32 || defined _WIN32
+#    define ITT_OS ITT_OS_WIN
+#  elif defined( __APPLE__ ) && defined( __MACH__ )
+#    define ITT_OS ITT_OS_MAC
+#  else
+#    define ITT_OS ITT_OS_LINUX
+#  endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+#  define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+#  define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+#  if ITT_OS==ITT_OS_WIN
+#    define ITT_PLATFORM ITT_PLATFORM_WIN
+#  else
+#    define ITT_PLATFORM ITT_PLATFORM_POSIX
+#  endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#include <stddef.h>
+#include <stdarg.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define CDECL __cdecl
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define CDECL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define STDCALL __stdcall
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define STDCALL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI    CDECL
+#define LIBITTAPI /* nothing */
+
+#ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro")
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+// #warning usage leads to ICC's compilation error
+// #    warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro"
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#  include "legacy/ittnotify.h"
+#endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */
+
+#define ITT_JOIN_AUX(p,n) p##n
+#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)
+
+#ifndef INTEL_ITTNOTIFY_PREFIX
+#  define INTEL_ITTNOTIFY_PREFIX __itt_
+#endif /* INTEL_ITTNOTIFY_PREFIX */
+#ifndef INTEL_ITTNOTIFY_POSTFIX
+#  define INTEL_ITTNOTIFY_POSTFIX _ptr_
+#endif /* INTEL_ITTNOTIFY_POSTFIX */
+
+#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
+#define ITTNOTIFY_NAME(n)     ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))
+
+#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)
+#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)
+
+#ifdef ITT_STUB
+#undef ITT_STUB
+#endif
+#ifdef ITT_STUBV
+#undef ITT_STUBV
+#endif
+#define ITT_STUBV(api,type,name,args,params)                      \
+    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \
+    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);
+#define ITT_STUB ITT_STUBV
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/** @endcond */
+
+/**
+ * @defgroup public Public API
+ * @{
+ * @}
+ */
+
+/**
+ * @defgroup control Collection Control
+ * @ingroup public
+ * General behavior: application continues to run, but no profiling information is being collected
+ *
+ * Pausing occurs not only for the current thread but for all process as well as spawned processes
+ * - Intel(R) Parallel Inspector:
+ *   - Does not analyze or report errors that involve memory access.
+ *   - Other errors are reported as usual. Pausing data collection in
+ *     Intel(R) Parallel Inspector only pauses tracing and analyzing
+ *     memory access. It does not pause tracing or analyzing threading APIs.
+ *   .
+ * - Intel(R) Parallel Amplifier:
+ *   - Does continue to record when new threads are started.
+ *   .
+ * - Other effects:
+ *   - Possible reduction of runtime overhead.
+ *   .
+ * @{
+ */
+/** @brief Pause collection */
+void ITTAPI __itt_pause(void);
+/** @brief Resume collection */
+void ITTAPI __itt_resume(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, pause,  (void), ())
+ITT_STUBV(ITTAPI, void, resume, (void), ())
+#define __itt_pause      ITTNOTIFY_VOID(pause)
+#define __itt_pause_ptr  ITTNOTIFY_NAME(pause)
+#define __itt_resume     ITTNOTIFY_VOID(resume)
+#define __itt_resume_ptr ITTNOTIFY_NAME(resume)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_pause()
+#define __itt_pause_ptr  0
+#define __itt_resume()
+#define __itt_resume_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_pause_ptr  0
+#define __itt_resume_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} control group */
+
+/**
+ * @defgroup threads Threads
+ * @ingroup public
+ * Threads name group
+ * @{
+ */
+/**
+ * @brief Sets thread name using char or Unicode string
+ * @param[in] name - name of thread
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+void ITTAPI __itt_thread_set_nameA(const char    *name);
+void ITTAPI __itt_thread_set_nameW(const wchar_t *name);
+#ifdef UNICODE
+#  define __itt_thread_set_name     __itt_thread_set_nameW
+#  define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr
+#else /* UNICODE */
+#  define __itt_thread_set_name     __itt_thread_set_nameA
+#  define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr
+#endif /* UNICODE */
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+void ITTAPI __itt_thread_set_name(const char *name);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char    *name), (name))
+ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (name))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, thread_set_name,  (const char    *name), (name))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thread_set_nameA     ITTNOTIFY_VOID(thread_set_nameA)
+#define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA)
+#define __itt_thread_set_nameW     ITTNOTIFY_VOID(thread_set_nameW)
+#define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thread_set_name     ITTNOTIFY_VOID(thread_set_name)
+#define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thread_set_nameA(name)
+#define __itt_thread_set_nameA_ptr 0
+#define __itt_thread_set_nameW(name)
+#define __itt_thread_set_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thread_set_name(name)
+#define __itt_thread_set_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thread_set_nameA_ptr 0
+#define __itt_thread_set_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thread_set_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Mark current thread as ignored from this point on, for the duration of its existence.
+ */
+void ITTAPI __itt_thread_ignore(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, thread_ignore, (void), ())
+#define __itt_thread_ignore     ITTNOTIFY_VOID(thread_ignore)
+#define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_thread_ignore()
+#define __itt_thread_ignore_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_thread_ignore_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} threads group */
+
+/**
+ * @defgroup sync Synchronization
+ * @ingroup public
+ * Synchronization group
+ * @{
+ */
+/**
+ * @hideinitializer
+ * @brief possible value of attribute argument for sync object type
+ */
+#define __itt_attr_barrier 1
+
+/**
+ * @hideinitializer
+ * @brief possible value of attribute argument for sync object type
+ */
+#define __itt_attr_mutex   2
+
+/**
+ * @brief Register the creation of a sync object using char or Unicode string
+ * @param[in] addr      - pointer to the sync object. You should use a real pointer to your object
+ *                        to make sure that the values don't clash with other object addresses
+ * @param[in] objtype   - null-terminated object type string. If NULL is passed, the object will
+ *                        be assumed to be of generic "User Synchronization" type
+ * @param[in] objname   - null-terminated object name string. If NULL, no name will be assigned
+ *                        to the object -- you can use the __itt_sync_rename call later to assign
+ *                        the name
+ * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the
+ *                        exact semantics of how prepare/acquired/releasing calls work.
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+void ITTAPI __itt_sync_createA(void *addr, const char    *objtype, const char    *objname, int attribute);
+void ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute);
+#ifdef UNICODE
+#  define __itt_sync_create     __itt_sync_createW
+#  define __itt_sync_create_ptr __itt_sync_createW_ptr
+#else /* UNICODE */
+#  define __itt_sync_create     __itt_sync_createA
+#  define __itt_sync_create_ptr __itt_sync_createA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+void ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char    *objtype, const char    *objname, int attribute), (addr, objtype, objname, attribute))
+ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (addr, objtype, objname, attribute))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_create,  (void *addr, const char* objtype, const char* objname, int attribute), (addr, objtype, objname, attribute))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_createA     ITTNOTIFY_VOID(sync_createA)
+#define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA)
+#define __itt_sync_createW     ITTNOTIFY_VOID(sync_createW)
+#define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_create     ITTNOTIFY_VOID(sync_create)
+#define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_createA(addr, objtype, objname, attribute)
+#define __itt_sync_createA_ptr 0
+#define __itt_sync_createW(addr, objtype, objname, attribute)
+#define __itt_sync_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_create(addr, objtype, objname, attribute)
+#define __itt_sync_create_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_createA_ptr 0
+#define __itt_sync_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_create_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Assign a name to a sync object using char or Unicode string.
+ *
+ * Sometimes you cannot assign the name to a sync object in the __itt_sync_set_name() call because it
+ * is not yet known there. In this case you should use the rename call which allows to assign the
+ * name after the creation has been registered. The renaming can be done multiple times. All waits
+ * after a new name has been assigned will be attributed to the sync object with this name.
+ * @param[in] addr - pointer to the sync object
+ * @param[in] name - null-terminated object name string
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+void ITTAPI __itt_sync_renameA(void *addr, const char    *name);
+void ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name);
+#ifdef UNICODE
+#  define __itt_sync_rename     __itt_sync_renameW
+#  define __itt_sync_rename_ptr __itt_sync_renameW_ptr
+#else /* UNICODE */
+#  define __itt_sync_rename     __itt_sync_renameA
+#  define __itt_sync_rename_ptr __itt_sync_renameA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+void ITTAPI __itt_sync_rename(void *addr, const char *name);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char    *name), (addr, name))
+ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (addr, name))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_rename,  (void *addr, const char    *name), (addr, name))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_renameA     ITTNOTIFY_VOID(sync_renameA)
+#define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA)
+#define __itt_sync_renameW     ITTNOTIFY_VOID(sync_renameW)
+#define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_rename     ITTNOTIFY_VOID(sync_rename)
+#define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_renameA(addr, name)
+#define __itt_sync_renameA_ptr 0
+#define __itt_sync_renameW(addr, name)
+#define __itt_sync_renameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_rename(addr, name)
+#define __itt_sync_rename_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_renameA_ptr 0
+#define __itt_sync_renameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_rename_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Is called when sync object is destroyed (needed to track lifetime of objects)
+ */
+void ITTAPI __itt_sync_destroy(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr), (addr))
+#define __itt_sync_destroy     ITTNOTIFY_VOID(sync_destroy)
+#define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_sync_destroy(addr)
+#define __itt_sync_destroy_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_sync_destroy_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/*****************************************************************//**
+ * @name group of functions is used for performance measurement tools
+ *********************************************************************/
+/** @{ */
+/**
+ * @brief Enter spin loop on user-defined sync object
+ */
+void ITTAPI __itt_sync_prepare(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, sync_prepare, (void *addr), (addr))
+#define __itt_sync_prepare     ITTNOTIFY_VOID(sync_prepare)
+#define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_sync_prepare(addr)
+#define __itt_sync_prepare_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_sync_prepare_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Quit spin loop without acquiring spin object
+ */
+void ITTAPI __itt_sync_cancel(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr), (addr))
+#define __itt_sync_cancel     ITTNOTIFY_VOID(sync_cancel)
+#define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_sync_cancel(addr)
+#define __itt_sync_cancel_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_sync_cancel_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Successful spin loop completion (sync object acquired)
+ */
+void ITTAPI __itt_sync_acquired(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr), (addr))
+#define __itt_sync_acquired     ITTNOTIFY_VOID(sync_acquired)
+#define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_sync_acquired(addr)
+#define __itt_sync_acquired_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_sync_acquired_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Start sync object releasing code. Is called before the lock release call.
+ */
+void ITTAPI __itt_sync_releasing(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, sync_releasing, (void *addr), (addr))
+#define __itt_sync_releasing     ITTNOTIFY_VOID(sync_releasing)
+#define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_sync_releasing(addr)
+#define __itt_sync_releasing_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_sync_releasing_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} */
+
+/**************************************************************//**
+ * @name group of functions is used for correctness checking tools
+ ******************************************************************/
+/** @{ */
+/**
+ * @brief Fast synchronization which does no require spinning.
+ * - This special function is to be used by TBB and OpenMP libraries only when they know
+ *   there is no spin but they need to suppress TC warnings about shared variable modifications.
+ * - It only has corresponding pointers in static library and does not have corresponding function
+ *   in dynamic library.
+ * @see void __itt_sync_prepare(void* addr);
+ */
+void ITTAPI __itt_fsync_prepare(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr), (addr))
+#define __itt_fsync_prepare     ITTNOTIFY_VOID(fsync_prepare)
+#define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_fsync_prepare(addr)
+#define __itt_fsync_prepare_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_fsync_prepare_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Fast synchronization which does no require spinning.
+ * - This special function is to be used by TBB and OpenMP libraries only when they know
+ *   there is no spin but they need to suppress TC warnings about shared variable modifications.
+ * - It only has corresponding pointers in static library and does not have corresponding function
+ *   in dynamic library.
+ * @see void __itt_sync_cancel(void *addr);
+ */
+void ITTAPI __itt_fsync_cancel(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr), (addr))
+#define __itt_fsync_cancel     ITTNOTIFY_VOID(fsync_cancel)
+#define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_fsync_cancel(addr)
+#define __itt_fsync_cancel_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_fsync_cancel_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Fast synchronization which does no require spinning.
+ * - This special function is to be used by TBB and OpenMP libraries only when they know
+ *   there is no spin but they need to suppress TC warnings about shared variable modifications.
+ * - It only has corresponding pointers in static library and does not have corresponding function
+ *   in dynamic library.
+ * @see void __itt_sync_acquired(void *addr);
+ */
+void ITTAPI __itt_fsync_acquired(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr), (addr))
+#define __itt_fsync_acquired     ITTNOTIFY_VOID(fsync_acquired)
+#define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_fsync_acquired(addr)
+#define __itt_fsync_acquired_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_fsync_acquired_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Fast synchronization which does no require spinning.
+ * - This special function is to be used by TBB and OpenMP libraries only when they know
+ *   there is no spin but they need to suppress TC warnings about shared variable modifications.
+ * - It only has corresponding pointers in static library and does not have corresponding function
+ *   in dynamic library.
+ * @see void __itt_sync_releasing(void* addr);
+ */
+void ITTAPI __itt_fsync_releasing(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr), (addr))
+#define __itt_fsync_releasing     ITTNOTIFY_VOID(fsync_releasing)
+#define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_fsync_releasing(addr)
+#define __itt_fsync_releasing_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_fsync_releasing_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} */
+/** @} sync group */
+
+/**
+ * @defgroup model Modeling by Advisor
+ * @ingroup public
+ * This is the subset of itt used for modeling by Advisor.
+ * This API is called ONLY using annotate.h, by "Annotation" macros
+ * the user places in their sources during the parallelism modeling steps.
+ *
+ * The requirements, constraints, design and implementation
+ * for this interface are covered in:
+ * Shared%20Documents/Design%20Documents/AdvisorAnnotations.doc
+ *
+ * site_begin/end and task_begin/end take the address of handle variables,
+ * which are writeable by the API.  Handles must be 0 initialized prior
+ * to the first call to begin, or may cause a run-time failure.
+ * The handles are initialized in a multi-thread safe way by the API if
+ * the handle is 0.  The commonly expected idiom is one static handle to
+ * identify a site or task.  If a site or task of the same name has already
+ * been started during this collection, the same handle MAY be returned,
+ * but is not required to be - it is unspecified if data merging is done
+ * based on name.  These routines also take an instance variable.  Like
+ * the lexical instance, these must be 0 initialized.  Unlike the lexical
+ * instance, this is used to track a single dynamic instance.
+ *
+ * API used by the Intel Parallel Advisor to describe potential concurrency
+ * and related activities. User-added source annotations expand to calls
+ * to these procedures to enable modeling of a hypothetical concurrent
+ * execution serially.
+ * @{
+ */
+typedef void* __itt_model_site;             /*!< @brief handle for lexical site     */
+typedef void* __itt_model_site_instance;    /*!< @brief handle for dynamic instance */
+typedef void* __itt_model_task;             /*!< @brief handle for lexical site     */
+typedef void* __itt_model_task_instance;    /*!< @brief handle for dynamic instance */
+
+/**
+ * @enum __itt_model_disable
+ * @brief Enumerator for the disable methods
+ */
+typedef enum {
+    __itt_model_disable_observation,
+    __itt_model_disable_collection
+} __itt_model_disable;
+
+/**
+ * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support.
+ *
+ * site_begin/end model a potential concurrency site.
+ * site instances may be recursively nested with themselves.
+ * site_end exits the most recently started but unended site for the current
+ * thread.  The handle passed to end may be used to validate structure.
+ * Instances of a site encountered on different threads concurrently
+ * are considered completely distinct. If the site name for two different
+ * lexical sites match, it is unspecified whether they are treated as the
+ * same or different for data presentation.
+ */
+void ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name);
+void ITTAPI __itt_model_site_end  (__itt_model_site *site, __itt_model_site_instance *instance);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_site_begin,  (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (site, instance, name))
+ITT_STUBV(ITTAPI, void, model_site_end,    (__itt_model_site *site, __itt_model_site_instance *instance), (site, instance))
+#define __itt_model_site_begin      ITTNOTIFY_VOID(model_site_begin)
+#define __itt_model_site_begin_ptr  ITTNOTIFY_NAME(model_site_begin)
+#define __itt_model_site_end        ITTNOTIFY_VOID(model_site_end)
+#define __itt_model_site_end_ptr    ITTNOTIFY_NAME(model_site_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_site_begin(site, instance, name)
+#define __itt_model_site_begin_ptr  0
+#define __itt_model_site_end(site, instance)
+#define __itt_model_site_end_ptr    0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_site_begin_ptr  0
+#define __itt_model_site_end_ptr    0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support
+ *
+ * task_begin/end model a potential task, which is contained within the most
+ * closely enclosing dynamic site.  task_end exits the most recently started
+ * but unended task.  The handle passed to end may be used to validate
+ * structure.  It is unspecified if bad dynamic nesting is detected.  If it
+ * is, it should be encoded in the resulting data collection.  The collector
+ * should not fail due to construct nesting issues, nor attempt to directly
+ * indicate the problem.
+ */
+void ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name);
+void ITTAPI __itt_model_task_end  (__itt_model_task *task, __itt_model_task_instance *instance);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_task_begin,  (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (task, instance, name))
+ITT_STUBV(ITTAPI, void, model_task_end,    (__itt_model_task *task, __itt_model_task_instance *instance), (task, instance))
+#define __itt_model_task_begin      ITTNOTIFY_VOID(model_task_begin)
+#define __itt_model_task_begin_ptr  ITTNOTIFY_NAME(model_task_begin)
+#define __itt_model_task_end        ITTNOTIFY_VOID(model_task_end)
+#define __itt_model_task_end_ptr    ITTNOTIFY_NAME(model_task_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_task_begin(task, instance, name)
+#define __itt_model_task_begin_ptr  0
+#define __itt_model_task_end(task, instance)
+#define __itt_model_task_end_ptr    0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_task_begin_ptr  0
+#define __itt_model_task_end_ptr    0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support
+ *
+ * lock_acquire/release model a potential lock for both lockset and
+ * performance modeling.  Each unique address is modeled as a separate
+ * lock, with invalid addresses being valid lock IDs.  Specifically:
+ * no storage is accessed by the API at the specified address - it is only
+ * used for lock identification.  Lock acquires may be self-nested and are
+ * unlocked by a corresponding number of releases.
+ * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing,
+ * but may not have identical semantics.)
+ */
+void ITTAPI __itt_model_lock_acquire(void *lock);
+void ITTAPI __itt_model_lock_release(void *lock);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock), (lock))
+ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock), (lock))
+#define __itt_model_lock_acquire     ITTNOTIFY_VOID(model_lock_acquire)
+#define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire)
+#define __itt_model_lock_release     ITTNOTIFY_VOID(model_lock_release)
+#define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_lock_acquire(lock)
+#define __itt_model_lock_acquire_ptr 0
+#define __itt_model_lock_release(lock)
+#define __itt_model_lock_release_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_lock_acquire_ptr 0
+#define __itt_model_lock_release_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support
+ *
+ * record_allocation/deallocation describe user-defined memory allocator
+ * behavior, which may be required for correctness modeling to understand
+ * when storage is not expected to be actually reused across threads.
+ */
+void ITTAPI __itt_model_record_allocation  (void *addr, size_t size);
+void ITTAPI __itt_model_record_deallocation(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_record_allocation,   (void *addr, size_t size), (addr, size))
+ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr),              (addr))
+#define __itt_model_record_allocation       ITTNOTIFY_VOID(model_record_allocation)
+#define __itt_model_record_allocation_ptr   ITTNOTIFY_NAME(model_record_allocation)
+#define __itt_model_record_deallocation     ITTNOTIFY_VOID(model_record_deallocation)
+#define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_record_allocation(addr, size)
+#define __itt_model_record_allocation_ptr   0
+#define __itt_model_record_deallocation(addr)
+#define __itt_model_record_deallocation_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_record_allocation_ptr   0
+#define __itt_model_record_deallocation_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_INDUCTION_USES support
+ *
+ * Note particular storage is inductive through the end of the current site
+ */
+void ITTAPI __itt_model_induction_uses(void* addr, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size), (addr, size))
+#define __itt_model_induction_uses     ITTNOTIFY_VOID(model_induction_uses)
+#define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_induction_uses(addr, size)
+#define __itt_model_induction_uses_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_induction_uses_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_REDUCTION_USES support
+ *
+ * Note particular storage is used for reduction through the end
+ * of the current site
+ */
+void ITTAPI __itt_model_reduction_uses(void* addr, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size), (addr, size))
+#define __itt_model_reduction_uses     ITTNOTIFY_VOID(model_reduction_uses)
+#define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_reduction_uses(addr, size)
+#define __itt_model_reduction_uses_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_reduction_uses_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_OBSERVE_USES support
+ *
+ * Have correctness modeling record observations about uses of storage
+ * through the end of the current site
+ */
+void ITTAPI __itt_model_observe_uses(void* addr, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size), (addr, size))
+#define __itt_model_observe_uses     ITTNOTIFY_VOID(model_observe_uses)
+#define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_observe_uses(addr, size)
+#define __itt_model_observe_uses_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_observe_uses_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_CLEAR_USES support
+ *
+ * Clear the special handling of a piece of storage related to induction,
+ * reduction or observe_uses
+ */
+void ITTAPI __itt_model_clear_uses(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr), (addr))
+#define __itt_model_clear_uses     ITTNOTIFY_VOID(model_clear_uses)
+#define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_clear_uses(addr)
+#define __itt_model_clear_uses_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_clear_uses_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support
+ *
+ * disable_push/disable_pop push and pop disabling based on a parameter.
+ * Disabling observations stops processing of memory references during
+ * correctness modeling, and all annotations that occur in the disabled
+ * region.  This allows description of code that is expected to be handled
+ * specially during conversion to parallelism or that is not recognized
+ * by tools (e.g. some kinds of synchronization operations.)
+ * This mechanism causes all annotations in the disabled region, other
+ * than disable_push and disable_pop, to be ignored.  (For example, this
+ * might validly be used to disable an entire parallel site and the contained
+ * tasks and locking in it for data collection purposes.)
+ * The disable for collection is a more expensive operation, but reduces
+ * collector overhead significantly.  This applies to BOTH correctness data
+ * collection and performance data collection.  For example, a site
+ * containing a task might only enable data collection for the first 10
+ * iterations.  Both performance and correctness data should reflect this,
+ * and the program should run as close to full speed as possible when
+ * collection is disabled.
+ */
+void ITTAPI __itt_model_disable_push(__itt_model_disable x);
+void ITTAPI __itt_model_disable_pop(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x), (x))
+ITT_STUBV(ITTAPI, void, model_disable_pop,  (void),                  ())
+#define __itt_model_disable_push     ITTNOTIFY_VOID(model_disable_push)
+#define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push)
+#define __itt_model_disable_pop      ITTNOTIFY_VOID(model_disable_pop)
+#define __itt_model_disable_pop_ptr  ITTNOTIFY_NAME(model_disable_pop)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_model_disable_push(x)
+#define __itt_model_disable_push_ptr 0
+#define __itt_model_disable_pop()
+#define __itt_model_disable_pop_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_model_disable_push_ptr 0
+#define __itt_model_disable_pop_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} model group */
+
+/**
+ * @defgroup frames Frames
+ * @ingroup public
+ * Frames group
+ * @{
+ */
+/**
+ * @brief opaque structure for frame identification
+ */
+typedef struct __itt_frame_t *__itt_frame;
+
+/**
+ * @brief Create a global frame with given domain
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_frame ITTAPI __itt_frame_createA(const char    *domain);
+__itt_frame ITTAPI __itt_frame_createW(const wchar_t *domain);
+#ifdef UNICODE
+#  define __itt_frame_create     __itt_frame_createW
+#  define __itt_frame_create_ptr __itt_frame_createW_ptr
+#else /* UNICODE */
+#  define __itt_frame_create     __itt_frame_createA
+#  define __itt_frame_create_ptr __itt_frame_createA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_frame ITTAPI __itt_frame_create(const char *domain);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char    *domain), (domain))
+ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (domain))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_frame, frame_create,  (const char *domain), (domain))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_frame_createA     ITTNOTIFY_DATA(frame_createA)
+#define __itt_frame_createA_ptr ITTNOTIFY_NAME(frame_createA)
+#define __itt_frame_createW     ITTNOTIFY_DATA(frame_createW)
+#define __itt_frame_createW_ptr ITTNOTIFY_NAME(frame_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_frame_create     ITTNOTIFY_DATA(frame_create)
+#define __itt_frame_create_ptr ITTNOTIFY_NAME(frame_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_frame_createA(domain)
+#define __itt_frame_createA_ptr 0
+#define __itt_frame_createW(domain)
+#define __itt_frame_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_frame_create(domain)
+#define __itt_frame_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_frame_createA_ptr 0
+#define __itt_frame_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_frame_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/** @brief Record an frame begin occurrence. */
+void ITTAPI __itt_frame_begin(__itt_frame frame);
+/** @brief Record an frame end occurrence. */
+void ITTAPI __itt_frame_end  (__itt_frame frame);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (frame))
+ITT_STUBV(ITTAPI, void, frame_end,   (__itt_frame frame), (frame))
+#define __itt_frame_begin     ITTNOTIFY_VOID(frame_begin)
+#define __itt_frame_begin_ptr ITTNOTIFY_NAME(frame_begin)
+#define __itt_frame_end       ITTNOTIFY_VOID(frame_end)
+#define __itt_frame_end_ptr   ITTNOTIFY_NAME(frame_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_frame_begin(frame)
+#define __itt_frame_begin_ptr 0
+#define __itt_frame_end(frame)
+#define __itt_frame_end_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_frame_begin_ptr 0
+#define __itt_frame_end_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} frames group */
+
+/**
+ * @defgroup events Events
+ * @ingroup public
+ * Events group
+ * @{
+ */
+/** @brief user event type */
+typedef int __itt_event;
+
+/**
+ * @brief Create an event notification
+ * @note name or namelen being null/name and namelen not matching, user event feature not enabled
+ * @return non-zero event identifier upon success and __itt_err otherwise
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_event LIBITTAPI __itt_event_createA(const char    *name, int namelen);
+__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen);
+#ifdef UNICODE
+#  define __itt_event_create     __itt_event_createW
+#  define __itt_event_create_ptr __itt_event_createW_ptr
+#else
+#  define __itt_event_create     __itt_event_createA
+#  define __itt_event_create_ptr __itt_event_createA_ptr
+#endif /* UNICODE */
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen), (name, namelen))
+ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (name, namelen))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char *name, int namelen), (name, namelen))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA     ITTNOTIFY_DATA(event_createA)
+#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA)
+#define __itt_event_createW     ITTNOTIFY_DATA(event_createW)
+#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create      ITTNOTIFY_DATA(event_create)
+#define __itt_event_create_ptr  ITTNOTIFY_NAME(event_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA(name, namelen) (__itt_event)0
+#define __itt_event_createA_ptr 0
+#define __itt_event_createW(name, namelen) (__itt_event)0
+#define __itt_event_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create(name, namelen)  (__itt_event)0
+#define __itt_event_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA_ptr 0
+#define __itt_event_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an event occurrence.
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
+ */
+int LIBITTAPI __itt_event_start(__itt_event event);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (event))
+#define __itt_event_start     ITTNOTIFY_DATA(event_start)
+#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_event_start(event) (int)0
+#define __itt_event_start_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_event_start_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an event end occurrence.
+ * @note It is optional if events do not have durations.
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
+ */
+int LIBITTAPI __itt_event_end(__itt_event event);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (event))
+#define __itt_event_end     ITTNOTIFY_DATA(event_end)
+#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_event_end(event) (int)0
+#define __itt_event_end_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_event_end_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} events group */
+
+/**
+ * @defgroup heap Heap
+ * @ingroup public
+ * Heap group
+ * @{
+ */
+
+typedef void* __itt_heap_function;
+
+/**
+ * @brief Create an identification for heap function
+ * @return non-zero identifier or NULL
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_heap_function ITTAPI __itt_heap_function_createA(const char*    name, const char*    domain);
+__itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain);
+#ifdef UNICODE
+#  define __itt_heap_function_create     __itt_heap_function_createW
+#  define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr
+#else
+#  define __itt_heap_function_create     __itt_heap_function_createA
+#  define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr
+#endif /* UNICODE */
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char*    name, const char*    domain), (name, domain))
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain), (name, domain))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create,  (const char*    name, const char*    domain), (name, domain))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_heap_function_createA     ITTNOTIFY_DATA(heap_function_createA)
+#define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA)
+#define __itt_heap_function_createW     ITTNOTIFY_DATA(heap_function_createW)
+#define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_heap_function_create      ITTNOTIFY_DATA(heap_function_create)
+#define __itt_heap_function_create_ptr  ITTNOTIFY_NAME(heap_function_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_heap_function_createA(name, domain) (__itt_heap_function)0
+#define __itt_heap_function_createA_ptr 0
+#define __itt_heap_function_createW(name, domain) (__itt_heap_function)0
+#define __itt_heap_function_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_heap_function_create(name, domain)  (__itt_heap_function)0
+#define __itt_heap_function_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_heap_function_createA_ptr 0
+#define __itt_heap_function_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_heap_function_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an allocation begin occurrence.
+ */
+void ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized), (h, size, initialized))
+#define __itt_heap_allocate_begin     ITTNOTIFY_VOID(heap_allocate_begin)
+#define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_allocate_begin(h, size, initialized)
+#define __itt_heap_allocate_begin_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_allocate_begin_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an allocation end occurrence.
+ */
+void ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void* addr, size_t size, int initialized);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void* addr, size_t size, int initialized), (h, addr, size, initialized))
+#define __itt_heap_allocate_end     ITTNOTIFY_VOID(heap_allocate_end)
+#define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_allocate_end(h, addr, size, initialized)
+#define __itt_heap_allocate_end_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_allocate_end_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an free begin occurrence.
+ */
+void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr), (h, addr))
+#define __itt_heap_free_begin     ITTNOTIFY_VOID(heap_free_begin)
+#define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_free_begin(h, addr)
+#define __itt_heap_free_begin_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_free_begin_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an free end occurrence.
+ */
+void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr), (h, addr))
+#define __itt_heap_free_end     ITTNOTIFY_VOID(heap_free_end)
+#define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_free_end(h, addr)
+#define __itt_heap_free_end_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_free_end_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an reallocation begin occurrence.
+ */
+void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized), (h, addr, new_size, initialized))
+#define __itt_heap_reallocate_begin     ITTNOTIFY_VOID(heap_reallocate_begin)
+#define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_reallocate_begin(h, addr, new_size, initialized)
+#define __itt_heap_reallocate_begin_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_reallocate_begin_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an reallocation end occurrence.
+ */
+void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized), (h, addr, new_addr, new_size, initialized))
+#define __itt_heap_reallocate_end     ITTNOTIFY_VOID(heap_reallocate_end)
+#define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized)
+#define __itt_heap_reallocate_end_ptr   0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_reallocate_end_ptr   0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/** @brief internal access begin */
+void ITTAPI __itt_heap_internal_access_begin(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_internal_access_begin,  (void), ())
+#define __itt_heap_internal_access_begin      ITTNOTIFY_VOID(heap_internal_access_begin)
+#define __itt_heap_internal_access_begin_ptr  ITTNOTIFY_NAME(heap_internal_access_begin)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_internal_access_begin()
+#define __itt_heap_internal_access_begin_ptr  0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_internal_access_begin_ptr  0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/** @brief internal access end */
+void ITTAPI __itt_heap_internal_access_end(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void), ())
+#define __itt_heap_internal_access_end     ITTNOTIFY_VOID(heap_internal_access_end)
+#define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_heap_internal_access_end()
+#define __itt_heap_internal_access_end_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_heap_internal_access_end_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} heap group */
+
+/** @cond exclude_from_documentation */
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+/** @endcond */
+
+#endif /* _ITTNOTIFY_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h
new file mode 100644 (file)
index 0000000..f02cc47
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+#ifndef ITT_OS_WIN
+#  define ITT_OS_WIN   1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+#  define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+#  define ITT_OS_MAC   3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+#  if defined WIN32 || defined _WIN32
+#    define ITT_OS ITT_OS_WIN
+#  elif defined( __APPLE__ ) && defined( __MACH__ )
+#    define ITT_OS ITT_OS_MAC
+#  else
+#    define ITT_OS ITT_OS_LINUX
+#  endif
+#endif /* ITT_OS */
+
+#ifndef ITT_ARCH_IA32
+#  define ITT_ARCH_IA32  1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+#  define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+#  define ITT_ARCH_IA64  3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+#  if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+#    define ITT_ARCH ITT_ARCH_IA32E
+#  elif defined _M_IA64 || defined __ia64
+#    define ITT_ARCH ITT_ARCH_IA64
+#  else
+#    define ITT_ARCH ITT_ARCH_IA32
+#  endif
+#endif
+
+#ifndef ITT_PLATFORM_WIN
+#  define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */ 
+
+#ifndef ITT_PLATFORM_POSIX
+#  define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+#  if ITT_OS==ITT_OS_WIN
+#    define ITT_PLATFORM ITT_PLATFORM_WIN
+#  else
+#    define ITT_PLATFORM ITT_PLATFORM_POSIX
+#  endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#ifdef __cplusplus
+#  define ITT_EXTERN_C extern "C"
+#else
+#  define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x)     ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { static char __itt_build_check_##suffix[(expr) ? 1 : -1]; __itt_build_check_##suffix[0] = 0; } while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix)  __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr)           _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c
new file mode 100644 (file)
index 0000000..85a0a6e
--- /dev/null
@@ -0,0 +1,640 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#include <pthread.h>
+#include <dlfcn.h>
+#include <errno.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "disable_warnings.h"
+
+#define INTEL_NO_MACRO_BODY 
+#include "ittnotify.h"
+#include "legacy/ittnotify.h"
+#include "internal/ittnotify.h"
+#include "prototype/ittnotify.h"
+
+#include "ittnotify_types.h"
+
+#ifndef INTEL_ITTNOTIFY_PREFIX
+#define INTEL_ITTNOTIFY_PREFIX __itt_
+#endif /* INTEL_ITTNOTIFY_PREFIX */
+#ifndef INTEL_ITTNOTIFY_POSTFIX
+#define INTEL_ITTNOTIFY_POSTFIX _ptr_
+#endif /* INTEL_ITTNOTIFY_POSTFIX */
+
+#define _N_(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
+
+#ifndef CDECL
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define CDECL __cdecl
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define CDECL
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define STDCALL __stdcall
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define STDCALL
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+typedef FARPROC   FPTR;
+typedef DWORD     TIDT;
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+typedef void*     FPTR;
+typedef pthread_t TIDT;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+typedef HMODULE lib_t;
+typedef CRITICAL_SECTION mutex_t;
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+typedef void* lib_t;
+typedef pthread_mutex_t mutex_t;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+static volatile long ittnotify_init = 0;
+static lib_t ittnotify_lib = NULL;
+static __itt_error_notification_t* error_handler = NULL;
+
+#if ITT_OS==ITT_OS_WIN
+static const char* ittnotify_lib_name = "libittnotify.dll";
+#elif ITT_OS==ITT_OS_LINUX
+static const char* ittnotify_lib_name = "libittnotify.so";
+#elif ITT_OS==ITT_OS_MAC
+static const char* ittnotify_lib_name = "libittnotify.dylib";
+#else
+#error Unsupported or unknown OS.
+#endif
+
+#ifndef LIB_VAR_NAME
+#if ITT_ARCH==ITT_ARCH_IA32
+#define LIB_VAR_NAME INTEL_LIBITTNOTIFY32
+#else
+#define LIB_VAR_NAME INTEL_LIBITTNOTIFY64
+#endif
+#endif /* LIB_VAR_NAME */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex)   InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex)   EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name)      LoadLibraryA(name)
+#define __itt_unload_lib(handle)  FreeLibrary(handle)
+#define __itt_system_error()      (int)GetLastError()
+#define __itt_fstrcmp(s1, s2)     lstrcmpA(s1, s2)
+#define __itt_fstrlen(s)          lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_thread_id()         GetCurrentThreadId()
+#define __itt_thread_yield()      SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+static int __itt_interlocked_increment(volatile int* ptr)
+{
+    ITT_BUILD_ASSERT(sizeof(int) == sizeof(long));
+    return InterlockedIncrement((volatile long *)ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex)   \
+    {                                                                                        \
+        pthread_mutexattr_t mutex_attr;                                                      \
+        int error_code = pthread_mutexattr_init(&mutex_attr);                                \
+        if (error_code)                                                                      \
+            __itt_report_error(__itt_error_system, "pthread_mutexattr_init", error_code);    \
+        error_code = pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);        \
+        if (error_code)                                                                      \
+            __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", error_code); \
+        error_code = pthread_mutex_init(mutex, &mutex_attr);                                 \
+        if (error_code)                                                                      \
+            __itt_report_error(__itt_error_system, "pthread_mutex_init", error_code);        \
+        error_code = pthread_mutexattr_destroy(&mutex_attr);                                 \
+        if (error_code)                                                                      \
+            __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", error_code); \
+    }
+#define __itt_mutex_lock(mutex)   pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name)      dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle)  dlclose(handle)
+#define __itt_system_error()      errno
+#define __itt_fstrcmp(s1, s2)     strcmp(s1, s2)
+#define __itt_fstrlen(s)          strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_thread_id()         pthread_self()
+#define __itt_thread_yield()      sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else  /* __INTEL_COMPILER */
+// TODO: Add Support for not Intel compilers for IA64
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+static int __TBB_machine_fetchadd4(volatile void* ptr, int addend)
+{
+    int result;
+    __asm__ __volatile__("lock\nxaddl %0,%1"
+                          : "=r"(result),"=m"(*(int *)ptr)
+                          : "0"(addend), "m"(*(int *)ptr)
+                          : "memory");
+    return result;
+}
+#endif // ITT_SIMPLE_INIT
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+static int __itt_interlocked_increment(volatile int* ptr)
+{
+    return __TBB_machine_fetchadd4(ptr, 1) + 1;
+}
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+const int _N_(err) = 0;
+
+typedef int (__itt_init_ittlib_t)(const char*, __itt_group_id);
+
+/* this define used to control initialization function name. */
+#ifndef __itt_init_ittlib_name
+static int _N_(init_ittlib)(const char*, __itt_group_id);
+static __itt_init_ittlib_t* __itt_init_ittlib_ptr = _N_(init_ittlib);
+#define __itt_init_ittlib_name __itt_init_ittlib_ptr
+#endif /* __itt_init_ittlib_name */
+
+/* building pointers to imported funcs */
+#undef ITT_STUBV
+#undef ITT_STUB
+#define ITT_STUB(api,type,name,args,params,ptr,group,format)      \
+    static type api ITT_JOIN(_N_(name),_init) args;               \
+    typedef type api name##_t args;                               \
+    extern "C" name##_t* ITTNOTIFY_NAME(name);                    \
+    name##_t* ITTNOTIFY_NAME(name) = ITT_JOIN(_N_(name),_init);   \
+    static type api ITT_JOIN(_N_(name),_init) args                \
+    {                                                             \
+        if (__itt_init_ittlib_name(NULL, __itt_group_none)        \
+            && ITTNOTIFY_NAME(name)                               \
+            && ITTNOTIFY_NAME(name) != ITT_JOIN(_N_(name),_init)) \
+            return ITTNOTIFY_NAME(name) params;                   \
+        else                                                      \
+            return (type)0;                                       \
+    }
+
+#define ITT_STUBV(api,type,name,args,params,ptr,group,format)     \
+    static type api ITT_JOIN(_N_(name),_init) args;               \
+    typedef type api name##_t args;                               \
+    extern "C" name##_t* ITTNOTIFY_NAME(name);                    \
+    name##_t* ITTNOTIFY_NAME(name) = ITT_JOIN(_N_(name),_init);   \
+    static type api ITT_JOIN(_N_(name),_init) args                \
+    {                                                             \
+        if (__itt_init_ittlib_name(NULL, __itt_group_none)        \
+            && ITTNOTIFY_NAME(name)                               \
+            && ITTNOTIFY_NAME(name) != ITT_JOIN(_N_(name),_init)) \
+            ITTNOTIFY_NAME(name) params;                          \
+        else                                                      \
+            return;                                               \
+    }
+
+/* Define types and *_init functions. */
+#include "ittnotify_static.h"
+
+ITT_GROUP_LIST(group_list);
+
+typedef struct __itt_group_alias_
+{
+    const char*    env_var;
+    __itt_group_id groups;
+} __itt_group_alias;
+
+static __itt_group_alias group_alias[] = {
+    { "KMP_FOR_TPROFILE", (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_sync  | __itt_group_mark) },
+    { "KMP_FOR_TCHECK",   (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_fsync | __itt_group_mark) },
+    { NULL,               (__itt_group_none) }
+};
+
+typedef struct __itt_func_map_
+{
+    const char*    name;
+    void**         func_ptr;
+    __itt_group_id group;
+} __itt_func_map;
+
+#define __ptr_(pname,name,group) {ITT_TO_STR(ITT_JOIN(__itt_,pname)), (void**)(void*)&ITTNOTIFY_NAME(name), (__itt_group_id)(group)},
+#undef ITT_STUB
+#undef ITT_STUBV
+#define ITT_STUB(api,type,name,args,params,nameindll,group,format) __ptr_(nameindll,name,group)
+#define ITT_STUBV ITT_STUB
+
+static __itt_func_map func_map[] = {
+#include "ittnotify_static.h"
+    {NULL, NULL, __itt_group_none}
+};
+
+#ifndef ITT_SIMPLE_INIT
+
+#undef ITT_STUBV
+#undef ITT_STUB
+#define ITT_STUBV(api,type,name,args,params,ptr,group,format) \
+ITT_EXTERN_C type api _N_(name) args                          \
+{                                                             \
+    if (ITTNOTIFY_NAME(name))                                 \
+        ITTNOTIFY_NAME(name) params;                          \
+    else                                                      \
+        return;                                               \
+}
+
+#define ITT_STUB(api,type,name,args,params,ptr,group,format) \
+ITT_EXTERN_C type api _N_(name) args                         \
+{                                                            \
+    if (ITTNOTIFY_NAME(name))                                \
+        return ITTNOTIFY_NAME(name) params;                  \
+    else                                                     \
+        return (type)0;                                      \
+}
+
+/* Define ITT functions. */
+#include "ittnotify_static.h"
+
+#endif /* ITT_SIMPLE_INIT */
+
+static const char* __itt_fsplit(const char* s, const char* sep, const char** out, int* len)
+{
+    int i;
+    int j;
+
+    if (!s || !sep || !out || !len)
+        return 0;
+
+    for (i = 0; s[i]; i++)
+    {
+        int b = 0;
+        for (j = 0; sep[j]; j++)
+            if (s[i] == sep[j])
+            {
+                b = 1;
+                break;
+            }
+        if (!b)
+            break;
+    }
+
+    if (!s[i])
+        return 0;
+
+    *len = 0;
+    *out = s + i;
+
+    for (; s[i]; i++, (*len)++)
+    {
+        int b = 0;
+        for (j = 0; sep[j]; j++)
+            if (s[i] == sep[j])
+            {
+                b = 1;
+                break;
+            }
+        if (b)
+            break;
+    }
+
+    for (; s[i]; i++)
+    {
+        int b = 0;
+        for (j = 0; sep[j]; j++)
+            if (s[i] == sep[j])
+            {
+                b = 1;
+                break;
+            }
+        if (!b)
+            break;
+    }
+
+    return s + i;
+}
+
+#ifdef ITT_NOTIFY_EXT_REPORT
+ITT_EXTERN_C void _N_(error_handler)(__itt_error_code, va_list args);
+#endif /* ITT_NOTIFY_EXT_REPORT */
+
+static void __itt_report_error(__itt_error_code code, ...)
+{
+    va_list args;
+    va_start( args, code );
+    if (error_handler != NULL)
+        error_handler(code, args);
+#ifdef ITT_NOTIFY_EXT_REPORT
+    _N_(error_handler)(code, args);
+#endif /* ITT_NOTIFY_EXT_REPORT */
+    va_end(args);
+}
+
+static const char* __itt_get_env_var(const char* name)
+{
+#define MAX_ENV_VALUE_SIZE 4086
+    static char  env_buff[MAX_ENV_VALUE_SIZE];
+    static char* env_value = (char*)&env_buff;
+
+    if (name != NULL)
+    {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+        size_t max_len = MAX_ENV_VALUE_SIZE - ((size_t)env_value - (size_t)&env_buff);
+        DWORD rc = GetEnvironmentVariableA(name, env_value, (DWORD)max_len);
+        if (rc >= max_len)
+        {
+            __itt_report_error(__itt_error_env_too_long, name, (size_t)rc - 1, (size_t)(max_len - 1));
+        }
+        else if (rc > 0)
+        {
+            char* ret = env_value;
+            env_value += rc + 1;
+            return ret;
+        }
+        else
+        {
+            /* If environment variable is empty, GetEnvirornmentVariables() returns zero (number of   */
+            /* characters (not including terminating null), and GetLastError() returns ERROR_SUCCESS. */
+            DWORD err = GetLastError();
+            if (err == ERROR_SUCCESS)
+                return env_value;
+
+            if (err != ERROR_ENVVAR_NOT_FOUND)
+                __itt_report_error(__itt_error_cant_read_env, name, (int)err);
+        }
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+        char* env = getenv(name);
+        if (env != NULL)
+        {
+            size_t len = strlen(env);
+            size_t max_len = MAX_ENV_VALUE_SIZE - ((size_t)env_value - (size_t)&env_buff);
+            if (len < max_len)
+            {
+                char* ret = env_value;
+                strncpy(env_value, env, len + 1);
+                env_value += len + 1;
+                return ret;
+            } else
+                __itt_report_error(__itt_error_env_too_long, name, (size_t)len, (size_t)(max_len - 1));
+        }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+    }
+    return NULL;
+}
+
+static const char* __itt_get_lib_name()
+{
+    const char* lib_name = __itt_get_env_var(ITT_TO_STR(LIB_VAR_NAME));
+    return (lib_name == NULL) ? ittnotify_lib_name : lib_name;
+}
+
+#ifndef min
+#define min(a,b) (a) < (b) ? (a) : (b)
+#endif /* min */
+
+static __itt_group_id __itt_get_groups()
+{
+    int i;
+    __itt_group_id res = __itt_group_none;
+
+    const char* var_name  = "INTEL_ITTNOTIFY_GROUPS";
+    const char* group_str = __itt_get_env_var(var_name);
+    if (group_str != NULL)
+    {
+        int len;
+        char gr[255];
+        const char* chunk;
+        while ((group_str = __itt_fsplit(group_str, ",; ", &chunk, &len)) != NULL)
+        {
+            __itt_fstrcpyn(gr, chunk, sizeof(gr));
+
+            gr[min((size_t)len, sizeof(gr) - 1)] = 0;
+
+            for (i = 0; group_list[i].name != NULL; i++)
+            {
+                if (!__itt_fstrcmp(gr, group_list[i].name))
+                {
+                    res = (__itt_group_id)(res | group_list[i].id);
+                    break;
+                }
+            }
+        }
+        /* TODO: !!! Workaround for bug with warning for unknown group !!!
+         * Should be fixed in new initialization scheme.
+         * Now the following groups should be set always.
+         */
+        for (i = 0; group_list[i].id != __itt_group_none; i++)
+            if (group_list[i].id != __itt_group_all && group_list[i].id > __itt_group_splitter)
+                res = (__itt_group_id)(res | group_list[i].id);
+        return res;
+    }
+    else
+    {
+        for (i = 0; group_alias[i].env_var != NULL; i++)
+            if (__itt_get_env_var(group_alias[i].env_var) != NULL)
+                return group_alias[i].groups;
+    }
+
+    return res;
+}
+
+static int __itt_is_legacy_lib(lib_t lib)
+{
+    if (lib == NULL)
+        return 0; // if unknown assume NO
+
+    if (__itt_get_proc(lib, "__itt_api_version"))
+        return 0; // New interface - NO
+    return 1; // It's legacy otherwise
+}
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#pragma warning(push)
+#pragma warning(disable: 4054)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* ITT_EXTERN_C - should be exported after agreament
+static void _N_(fini_ittlib)(void)
+{
+    int i;
+
+    if (ittnotify_init)
+    {
+        // Clear all pointers
+        for (i = 0; func_map[i].name != NULL; i++)
+            *func_map[i].func_ptr = NULL;
+
+        if (ittnotify_lib != NULL)
+            __itt_unload_lib(ittnotify_lib);
+
+        ittnotify_lib  = NULL;
+        ittnotify_init = 0;
+    }
+}
+*/
+
+static int _N_(init_ittlib)(const char* lib_name, __itt_group_id groups)
+{
+    int i, ret = 0;
+    static volatile TIDT current_thread = 0;
+
+    if (!ittnotify_init)
+    {
+#ifndef ITT_SIMPLE_INIT
+        static mutex_t mutex;
+        static volatile int inter_counter = 0;
+        static volatile int mutex_initialized = 0;
+
+        if (!mutex_initialized)
+        {
+            if (__itt_interlocked_increment(&inter_counter) == 1)
+            {
+                __itt_mutex_init(&mutex);
+                mutex_initialized = 1;
+            }
+            else
+                while (!mutex_initialized)
+                    __itt_thread_yield();
+        }
+
+        __itt_mutex_lock(&mutex);
+#endif /* ITT_SIMPLE_INIT */
+
+        if (!ittnotify_init)
+        {
+            if (current_thread == 0)
+            {
+                current_thread = __itt_thread_id();
+                if (groups == __itt_group_none)
+                    groups = __itt_get_groups();
+                if (groups == __itt_group_none)
+                {
+                    // Clear all pointers
+                    for (i = 0; func_map[i].name != NULL; i++ )
+                        *func_map[i].func_ptr = NULL;
+                }
+                else
+                {
+                    __itt_group_id zero_group = __itt_group_none;
+                    if (lib_name == NULL)
+                        lib_name = __itt_get_lib_name();
+                    ittnotify_lib = __itt_load_lib(lib_name);
+                    if (ittnotify_lib != NULL)
+                    {
+                        if (__itt_is_legacy_lib(ittnotify_lib))
+                            groups = __itt_group_legacy;
+
+                        for (i = 0; func_map[i].name != NULL; i++)
+                        {
+                            if (func_map[i].group & groups)
+                            {
+                                *func_map[i].func_ptr = (void*)__itt_get_proc(ittnotify_lib, func_map[i].name);
+                                if (*func_map[i].func_ptr == NULL)
+                                {
+                                    __itt_report_error(__itt_error_no_symbol, lib_name, func_map[i].name );
+                                    zero_group = (__itt_group_id)(zero_group | func_map[i].group);
+                                }
+                            }
+                            else
+                                *func_map[i].func_ptr = NULL;
+                        }
+
+                        if (groups == __itt_group_legacy)
+                        {
+                            // Compatibility with legacy tools
+                            ITTNOTIFY_NAME(sync_prepare)   = ITTNOTIFY_NAME(notify_sync_prepare);
+                            ITTNOTIFY_NAME(sync_cancel)    = ITTNOTIFY_NAME(notify_sync_cancel);
+                            ITTNOTIFY_NAME(sync_acquired)  = ITTNOTIFY_NAME(notify_sync_acquired);
+                            ITTNOTIFY_NAME(sync_releasing) = ITTNOTIFY_NAME(notify_sync_releasing);
+                        }
+                    }
+                    else
+                    {
+                        // Clear all pointers
+                        for (i = 0; func_map[i].name != NULL; i++)
+                            *func_map[i].func_ptr = NULL;
+
+                        __itt_report_error(__itt_error_no_module, lib_name,
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+                            __itt_system_error()
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+                            dlerror()
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+                        );
+                    }
+#ifdef ITT_COMPLETE_GROUP
+                    for (i = 0; func_map[i].name != NULL; i++)
+                        if (func_map[i].group & zero_group)
+                            *func_map[i].func_ptr = NULL;
+#endif /* ITT_COMPLETE_GROUP */
+
+                    /* evaluating if any function ptr is non empty */
+                    for (i = 0; func_map[i].name != NULL; i++)
+                    {
+                        if (*func_map[i].func_ptr != NULL)
+                        {
+                            ret = 1;
+                            break;
+                        }
+                    }
+                }
+
+                ittnotify_init = 1;
+                current_thread = 0;
+            }
+        }
+
+#ifndef ITT_SIMPLE_INIT
+        __itt_mutex_unlock(&mutex);
+#endif /* ITT_SIMPLE_INIT */
+    }
+
+    return ret;
+}
+
+ITT_EXTERN_C __itt_error_notification_t* _N_(set_error_handler)(__itt_error_notification_t* handler)
+{
+    __itt_error_notification_t* prev = error_handler;
+    error_handler = handler;
+    return prev;
+}
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#pragma warning(pop)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h
new file mode 100644 (file)
index 0000000..109674f
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "ittnotify_config.h"
+
+#ifndef ITT_STUB
+#  define ITT_STUB ITT_STUBV
+#endif /* ITT_STUB */
+
+#ifndef ITTAPI
+#  define ITTAPI CDECL
+#endif /* ITTAPI */
+
+#ifndef LIBITTAPI
+#  define LIBITTAPI /* nothing */
+#endif /* LIBITTAPI */
+
+#ifndef ITT_FORMAT_DEFINED
+#  ifndef ITT_FORMAT
+#    define ITT_FORMAT
+#  endif /* ITT_FORMAT */
+#  ifndef ITT_NO_PARAMS
+#    define ITT_NO_PARAMS
+#  endif /* ITT_NO_PARAMS */
+#endif /* ITT_FORMAT_DEFINED */
+
+/*
+ * parameters for macro expected:
+ * ITT_STUB(api, type, func_name, arguments, params, func_name_in_dll, group, printf_fmt)
+ */
+/* public */
+ITT_STUBV(ITTAPI, void, pause,  (void), (ITT_NO_PARAMS), pause,  __itt_group_control | __itt_group_legacy, "no args")
+ITT_STUBV(ITTAPI, void, resume, (void), (ITT_NO_PARAMS), resume, __itt_group_control | __itt_group_legacy, "no args")
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char    *name), (ITT_FORMAT name), thread_set_nameA, __itt_group_thread, "\"%s\"")
+ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (ITT_FORMAT name), thread_set_nameW, __itt_group_thread, "\"%S\"")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, thread_set_name,  (const char    *name), (ITT_FORMAT name), thread_set_name,  __itt_group_thread, "\"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, thread_ignore, (void), (ITT_NO_PARAMS), thread_ignore, __itt_group_thread, "no args")
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x")
+ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\", \"%S\", %x")
+ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char    *name), (ITT_FORMAT addr, name), sync_renameA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\"")
+ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (ITT_FORMAT addr, name), sync_renameW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\"")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_create,  (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_create,  __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x")
+ITT_STUBV(ITTAPI, void, sync_rename,  (void *addr, const char    *name), (ITT_FORMAT addr, name), sync_rename,  __itt_group_sync | __itt_group_fsync, "%p, \"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_destroy,    (void *addr), (ITT_FORMAT addr), sync_destroy,   __itt_group_sync | __itt_group_fsync, "%p")
+
+ITT_STUBV(ITTAPI, void, sync_prepare,    (void* addr), (ITT_FORMAT addr), sync_prepare,   __itt_group_sync,  "%p")
+ITT_STUBV(ITTAPI, void, sync_cancel,     (void *addr), (ITT_FORMAT addr), sync_cancel,    __itt_group_sync,  "%p")
+ITT_STUBV(ITTAPI, void, sync_acquired,   (void *addr), (ITT_FORMAT addr), sync_acquired,  __itt_group_sync,  "%p")
+ITT_STUBV(ITTAPI, void, sync_releasing,  (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_sync,  "%p")
+
+ITT_STUBV(ITTAPI, void, fsync_prepare,   (void* addr), (ITT_FORMAT addr), sync_prepare,   __itt_group_fsync, "%p")
+ITT_STUBV(ITTAPI, void, fsync_cancel,    (void *addr), (ITT_FORMAT addr), sync_cancel,    __itt_group_fsync, "%p")
+ITT_STUBV(ITTAPI, void, fsync_acquired,  (void *addr), (ITT_FORMAT addr), sync_acquired,  __itt_group_fsync, "%p")
+ITT_STUBV(ITTAPI, void, fsync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_fsync, "%p")
+
+ITT_STUBV(ITTAPI, void, model_site_begin,          (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (ITT_FORMAT site, instance, name), model_site_begin, __itt_group_model, "%p, %p, \"%s\"")
+ITT_STUBV(ITTAPI, void, model_site_end,            (__itt_model_site *site, __itt_model_site_instance *instance),                   (ITT_FORMAT site, instance),       model_site_end,   __itt_group_model, "%p, %p")
+ITT_STUBV(ITTAPI, void, model_task_begin,          (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (ITT_FORMAT task, instance, name), model_task_begin, __itt_group_model, "%p, %p, \"%s\"")
+ITT_STUBV(ITTAPI, void, model_task_end,            (__itt_model_task *task, __itt_model_task_instance *instance),                   (ITT_FORMAT task, instance),       model_task_end,   __itt_group_model, "%p, %p")
+ITT_STUBV(ITTAPI, void, model_lock_acquire,        (void *lock), (ITT_FORMAT lock), model_lock_acquire, __itt_group_model, "%p")
+ITT_STUBV(ITTAPI, void, model_lock_release,        (void *lock), (ITT_FORMAT lock), model_lock_release, __itt_group_model, "%p")
+ITT_STUBV(ITTAPI, void, model_record_allocation,   (void *addr, size_t size), (ITT_FORMAT addr, size), model_record_allocation,   __itt_group_model, "%p, %d")
+ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr),              (ITT_FORMAT addr),       model_record_deallocation, __itt_group_model, "%p")
+ITT_STUBV(ITTAPI, void, model_induction_uses,      (void* addr, size_t size), (ITT_FORMAT addr, size), model_induction_uses,      __itt_group_model, "%p, %d")
+ITT_STUBV(ITTAPI, void, model_reduction_uses,      (void* addr, size_t size), (ITT_FORMAT addr, size), model_reduction_uses,      __itt_group_model, "%p, %d")
+ITT_STUBV(ITTAPI, void, model_observe_uses,        (void* addr, size_t size), (ITT_FORMAT addr, size), model_observe_uses,        __itt_group_model, "%p, %d")
+ITT_STUBV(ITTAPI, void, model_clear_uses,          (void* addr),              (ITT_FORMAT addr),       model_clear_uses,          __itt_group_model, "%p")
+ITT_STUBV(ITTAPI, void, model_disable_push,        (__itt_model_disable x),   (ITT_FORMAT x),          model_disable_push,        __itt_group_model, "%p")
+ITT_STUBV(ITTAPI, void, model_disable_pop,         (void),                    (ITT_NO_PARAMS),         model_disable_pop,         __itt_group_model, "no args")
+
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char    *name, const char    *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, "\"%s\", \"%s\"")
+ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, "\"%s\", \"%s\"")
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_counter, counter_create,  (const char    *name, const char    *domain), (ITT_FORMAT name, domain), counter_create,  __itt_group_counter, "\"%s\", \"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* __ITT_INTERNAL_BODY */
+ITT_STUBV(ITTAPI, void, counter_destroy,   (__itt_counter id),                           (ITT_FORMAT id),        counter_destroy,   __itt_group_counter, "%p")
+ITT_STUBV(ITTAPI, void, counter_inc,       (__itt_counter id),                           (ITT_FORMAT id),        counter_inc,       __itt_group_counter, "%p")
+ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, "%p, %lu")
+
+#ifndef __ITT_INTERNAL_BODY
+ITT_STUB(ITTAPI, __itt_caller, stack_caller_create,  (void),         (ITT_NO_PARAMS), stack_caller_create,  __itt_group_stitch, "no args")
+#endif /* __ITT_INTERNAL_BODY */
+ITT_STUBV(ITTAPI, void, stack_caller_destroy,     (__itt_caller id), (ITT_FORMAT id), stack_caller_destroy, __itt_group_stitch, "%p")
+ITT_STUBV(ITTAPI, void, stack_callee_enter,       (__itt_caller id), (ITT_FORMAT id), stack_callee_enter,   __itt_group_stitch, "%p")
+ITT_STUBV(ITTAPI, void, stack_callee_leave,       (__itt_caller id), (ITT_FORMAT id), stack_callee_leave,   __itt_group_stitch, "%p")
+
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char    *domain), (ITT_FORMAT domain), frame_createA, __itt_group_frame, "\"%s\"")
+ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (ITT_FORMAT domain), frame_createW, __itt_group_frame, "\"%s\"")
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_frame, frame_create,  (const char    *domain), (ITT_FORMAT domain), frame_create,  __itt_group_frame, "\"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* __ITT_INTERNAL_BODY */
+ITT_STUBV(ITTAPI, void, frame_begin,         (__itt_frame frame),     (ITT_FORMAT frame),  frame_begin,   __itt_group_frame, "%p")
+ITT_STUBV(ITTAPI, void, frame_end,           (__itt_frame frame),     (ITT_FORMAT frame),  frame_end,     __itt_group_frame, "%p")
+
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen), (ITT_FORMAT name, namelen), event_createA, __itt_group_mark | __itt_group_legacy, "\"%s\", %d")
+ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), event_createW, __itt_group_mark | __itt_group_legacy, "\"%S\", %d")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char    *name, int namelen), (ITT_FORMAT name, namelen), event_create,  __itt_group_mark | __itt_group_legacy, "\"%s\", %d")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, int,  event_start,          (__itt_event event),                (ITT_FORMAT event),         event_start,   __itt_group_mark | __itt_group_legacy, "%d")
+ITT_STUB(LIBITTAPI, int,  event_end,            (__itt_event event),                (ITT_FORMAT event),         event_end,     __itt_group_mark | __itt_group_legacy, "%d")
+#endif /* __ITT_INTERNAL_BODY */
+
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char    *name, const char    *domain), (ITT_FORMAT name, domain), heap_function_createA, __itt_group_heap, "\"%s\", \"%s\"")
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), heap_function_createW, __itt_group_heap, "\"%s\", \"%s\"")
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create,  (const char    *name, const char    *domain), (ITT_FORMAT name, domain), heap_function_create,  __itt_group_heap, "\"%s\", \"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* __ITT_INTERNAL_BODY */
+ITT_STUBV(ITTAPI, void, heap_allocate_begin,   (__itt_heap_function h, size_t size, int initialized),             (ITT_FORMAT h, size, initialized),       heap_allocate_begin, __itt_group_heap, "%p, %lu, %d")
+ITT_STUBV(ITTAPI, void, heap_allocate_end,     (__itt_heap_function h, void* addr, size_t size, int initialized), (ITT_FORMAT h, addr, size, initialized), heap_allocate_end,   __itt_group_heap, "%p, %p, %lu, %d")
+ITT_STUBV(ITTAPI, void, heap_free_begin,       (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_begin, __itt_group_heap, "%p, %p")
+ITT_STUBV(ITTAPI, void, heap_free_end,         (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_end,   __itt_group_heap, "%p, %p")
+ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized),                 (ITT_FORMAT h, addr, new_size, initialized),           heap_reallocate_begin, __itt_group_heap, "%p, %p, %lu, %d")
+ITT_STUBV(ITTAPI, void, heap_reallocate_end,   (__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_addr, new_size, initialized), heap_reallocate_end,   __itt_group_heap, "%p, %p, %p, %lu, %d")
+ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void), (ITT_NO_PARAMS), heap_internal_access_begin, __itt_group_heap, "no args")
+ITT_STUBV(ITTAPI, void, heap_internal_access_end,   (void), (ITT_NO_PARAMS), heap_internal_access_end,   __itt_group_heap, "no args")
+
+/* legacy */
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, int,  thr_name_setA, (const char    *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setA, __itt_group_thread | __itt_group_legacy, "\"%s\", %d")
+ITT_STUB(LIBITTAPI, int,  thr_name_setW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setW, __itt_group_thread | __itt_group_legacy, "\"%S\", %d")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, int,  thr_name_set,  (const char    *name, int namelen), (ITT_FORMAT name, namelen), thr_name_set,  __itt_group_thread | __itt_group_legacy, "\"%s\", %d")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(LIBITTAPI, void, thr_ignore,   (void),                             (ITT_NO_PARAMS),            thr_ignore,    __itt_group_thread | __itt_group_legacy, "no args")
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", \"%s\", %x")
+ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", \"%S\", %x")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_set_name,  (void *addr, const char    *objtype, const char    *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_name,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "p, \"%s\", \"%s\", %x")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *p, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x")
+ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *p, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", %d, \"%S\", %d, %x")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, int, notify_sync_name,  (void *p, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_name,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ITT_STUBV(LIBITTAPI, void, notify_sync_prepare,   (void *p), (ITT_FORMAT p), notify_sync_prepare,   __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
+ITT_STUBV(LIBITTAPI, void, notify_sync_cancel,    (void *p), (ITT_FORMAT p), notify_sync_cancel,    __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
+ITT_STUBV(LIBITTAPI, void, notify_sync_acquired,  (void *p), (ITT_FORMAT p), notify_sync_acquired,  __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
+ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *p), (ITT_FORMAT p), notify_sync_releasing, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p")
+#endif /* __ITT_INTERNAL_BODY */
+
+ITT_STUBV(LIBITTAPI, void, memory_read,   (void *addr, size_t size), (ITT_FORMAT addr, size), memory_read,   __itt_group_legacy, "%p, %lu")
+ITT_STUBV(LIBITTAPI, void, memory_write,  (void *addr, size_t size), (ITT_FORMAT addr, size), memory_write,  __itt_group_legacy, "%p, %lu")
+ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_update, __itt_group_legacy, "%p, %lu")
+
+ITT_STUB(LIBITTAPI, __itt_state_t,     state_get,    (void),                                    (ITT_NO_PARAMS),   state_get,    __itt_group_legacy, "no args")
+ITT_STUB(LIBITTAPI, __itt_state_t,     state_set,    (__itt_state_t s),                         (ITT_FORMAT s),    state_set,    __itt_group_legacy, "%d")
+ITT_STUB(LIBITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (ITT_FORMAT p, s), obj_mode_set, __itt_group_legacy, "%d, %d")
+ITT_STUB(LIBITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (ITT_FORMAT p, s), thr_mode_set, __itt_group_legacy, "%d, %d")
+
+/* internal */
+#ifndef __ITT_INTERNAL_BODY
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char    *name), (ITT_FORMAT name), mark_createA, __itt_group_mark, "\"%s\"")
+ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (ITT_FORMAT name), mark_createW, __itt_group_mark, "\"%S\"")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, __itt_mark_type, mark_create,  (const char    *name), (ITT_FORMAT name), mark_create,  __itt_group_mark, "\"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* __ITT_INTERNAL_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, int,  markA,        (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), markA, __itt_group_mark, "%d, \"%s\"")
+ITT_STUB(ITTAPI, int,  markW,        (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), markW, __itt_group_mark, "%d, \"%S\"")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int,  mark,         (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark,  __itt_group_mark, "%d, \"%s\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int,  mark_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_off, __itt_group_mark, "%d")
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(ITTAPI, int,  mark_globalA, (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark_globalA, __itt_group_mark, "%d, \"%s\"")
+ITT_STUB(ITTAPI, int,  mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), mark_globalW, __itt_group_mark, "%d, \"%S\"")
+#else  /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int,  mark_global,  (__itt_mark_type mt, const char    *parameter), (ITT_FORMAT mt, parameter), mark_global,  __itt_group_mark, "%d, \"%S\"")
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(ITTAPI, int,  mark_global_off, (__itt_mark_type mt),                        (ITT_FORMAT mt),            mark_global_off, __itt_group_mark, "%d")
+
+/* prototype */
+/* empty so far */
+
+/* hidden */
+#ifndef __ITT_INTERNAL_BODY
+ITT_STUB(ITTAPI, const char*, api_version, (void), (ITT_NO_PARAMS), api_version, __itt_group_all & ~__itt_group_legacy, "no args")
+#endif /* __ITT_INTERNAL_BODY */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h
new file mode 100644 (file)
index 0000000..ec0d696
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum __itt_group_id_
+{
+    __itt_group_none    = 0,
+    __itt_group_legacy  = 1<<0,
+    __itt_group_control = 1<<1,
+    __itt_group_thread  = 1<<2,
+    __itt_group_mark    = 1<<3,
+    __itt_group_sync    = 1<<4,
+    __itt_group_fsync   = 1<<5,
+    __itt_group_jit     = 1<<6,
+    __itt_group_model   = 1<<7,
+    __itt_group_splitter= 1<<7,
+//-----------------------------
+    __itt_group_counter = 1<<8,
+    __itt_group_frame   = 1<<9,
+    __itt_group_stitch  = 1<<10,
+    __itt_group_heap    = 1<<11,
+    __itt_group_all     = -1
+} __itt_group_id;
+
+typedef struct __itt_group_list_
+{
+    __itt_group_id id;
+    const char*    name;
+} __itt_group_list;
+
+#define ITT_GROUP_LIST(varname) \
+    static __itt_group_list varname[] = {   \
+        { __itt_group_all,     "all"     }, \
+        { __itt_group_control, "control" }, \
+        { __itt_group_thread,  "thread"  }, \
+        { __itt_group_mark,    "mark"    }, \
+        { __itt_group_sync,    "sync"    }, \
+        { __itt_group_fsync,   "fsync"   }, \
+        { __itt_group_jit,     "jit"     }, \
+        { __itt_group_model,   "model"   }, \
+        { __itt_group_counter, "counter" }, \
+        { __itt_group_frame,   "frame"   }, \
+        { __itt_group_stitch,  "stitch"  }, \
+        { __itt_group_heap,    "heap"    }, \
+        { __itt_group_none,    NULL      }  \
+    }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h
new file mode 100644 (file)
index 0000000..dff5bbc
--- /dev/null
@@ -0,0 +1,817 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _LEGACY_ITTNOTIFY_H_
+#define _LEGACY_ITTNOTIFY_H_
+/**
+ * @file
+ * @brief Legacy User API functions and types
+ */
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+#  define ITT_OS_WIN   1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+#  define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+#  define ITT_OS_MAC   3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+#  if defined WIN32 || defined _WIN32
+#    define ITT_OS ITT_OS_WIN
+#  elif defined( __APPLE__ ) && defined( __MACH__ )
+#    define ITT_OS ITT_OS_MAC
+#  else
+#    define ITT_OS ITT_OS_LINUX
+#  endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+#  define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+#  define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+#  if ITT_OS==ITT_OS_WIN
+#    define ITT_PLATFORM ITT_PLATFORM_WIN
+#  else
+#    define ITT_PLATFORM ITT_PLATFORM_POSIX
+#  endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#include <stddef.h>
+#include <stdarg.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define CDECL __cdecl
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define CDECL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define STDCALL __stdcall
+#  else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define STDCALL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI    CDECL
+#define LIBITTAPI /* nothing */
+
+#define ITT_JOIN_AUX(p,n) p##n
+#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)
+
+#ifndef INTEL_ITTNOTIFY_PREFIX
+#  define INTEL_ITTNOTIFY_PREFIX __itt_
+#endif /* INTEL_ITTNOTIFY_PREFIX */
+#ifndef INTEL_ITTNOTIFY_POSTFIX
+#  define INTEL_ITTNOTIFY_POSTFIX _ptr_
+#endif /* INTEL_ITTNOTIFY_POSTFIX */
+
+#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
+#define ITTNOTIFY_NAME(n)     ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))
+
+#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)
+#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)
+
+#ifdef ITT_STUB
+#undef ITT_STUB
+#endif
+#ifdef ITT_STUBV
+#undef ITT_STUBV
+#endif
+#define ITT_STUBV(api,type,name,args,params)                      \
+    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \
+    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);
+#define ITT_STUB ITT_STUBV
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/** @endcond */
+
+/**
+ * @defgroup legacy Legacy API
+ * @{
+ * @}
+ */
+
+/**
+ * @defgroup legacy_control Collection Control
+ * @ingroup legacy
+ * General behavior: application continues to run, but no profiling information is being collected
+ *
+ * Pausing occurs not only for the current thread but for all process as well as spawned processes
+ * - Intel(R) Parallel Inspector:
+ *   - Does not analyze or report errors that involve memory access.
+ *   - Other errors are reported as usual. Pausing data collection in
+ *     Intel(R) Parallel Inspector only pauses tracing and analyzing
+ *     memory access. It does not pause tracing or analyzing threading APIs.
+ *   .
+ * - Intel(R) Parallel Amplifier:
+ *   - Does continue to record when new threads are started.
+ *   .
+ * - Other effects:
+ *   - Possible reduction of runtime overhead.
+ *   .
+ * @{
+ */
+#ifndef _ITTNOTIFY_H_
+/** @brief Pause collection */
+void ITTAPI __itt_pause(void);
+/** @brief Resume collection */
+void ITTAPI __itt_resume(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(ITTAPI, void, pause,   (void), ())
+ITT_STUBV(ITTAPI, void, resume,  (void), ())
+#define __itt_pause      ITTNOTIFY_VOID(pause)
+#define __itt_pause_ptr  ITTNOTIFY_NAME(pause)
+#define __itt_resume     ITTNOTIFY_VOID(resume)
+#define __itt_resume_ptr ITTNOTIFY_NAME(resume)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_pause()
+#define __itt_pause_ptr  0
+#define __itt_resume()
+#define __itt_resume_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_pause_ptr  0
+#define __itt_resume_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+#endif /* _ITTNOTIFY_H_ */
+/** @} legacy_control group */
+
+/**
+ * @defgroup legacy_threads Threads
+ * @ingroup legacy
+ * Threads group
+ * @warning Legacy API
+ * @{
+ */
+/**
+ * @deprecated Legacy API
+ * @brief Set name to be associated with thread in analysis GUI.
+ * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+int LIBITTAPI __itt_thr_name_setA(const char    *name, int namelen);
+int LIBITTAPI __itt_thr_name_setW(const wchar_t *name, int namelen);
+#ifdef UNICODE
+#  define __itt_thr_name_set     __itt_thr_name_setW
+#  define __itt_thr_name_set_ptr __itt_thr_name_setW_ptr
+#else
+#  define __itt_thr_name_set     __itt_thr_name_setA
+#  define __itt_thr_name_set_ptr __itt_thr_name_setA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+int LIBITTAPI __itt_thr_name_set(const char *name, int namelen);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, int, thr_name_setA, (const char    *name, int namelen), (name, namelen))
+ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen), (name, namelen))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, int, thr_name_set,  (const char    *name, int namelen), (name, namelen))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thr_name_setA     ITTNOTIFY_DATA(thr_name_setA)
+#define __itt_thr_name_setA_ptr ITTNOTIFY_NAME(thr_name_setA)
+#define __itt_thr_name_setW     ITTNOTIFY_DATA(thr_name_setW)
+#define __itt_thr_name_setW_ptr ITTNOTIFY_NAME(thr_name_setW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thr_name_set     ITTNOTIFY_DATA(thr_name_set)
+#define __itt_thr_name_set_ptr ITTNOTIFY_NAME(thr_name_set)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thr_name_setA(name, namelen)
+#define __itt_thr_name_setA_ptr 0
+#define __itt_thr_name_setW(name, namelen)
+#define __itt_thr_name_setW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thr_name_set(name, namelen)
+#define __itt_thr_name_set_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_thr_name_setA_ptr 0
+#define __itt_thr_name_setW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_thr_name_set_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Mark current thread as ignored from this point on, for the duration of its existence.
+ */
+void LIBITTAPI __itt_thr_ignore(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, thr_ignore, (void), ())
+#define __itt_thr_ignore     ITTNOTIFY_VOID(thr_ignore)
+#define __itt_thr_ignore_ptr ITTNOTIFY_NAME(thr_ignore)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_thr_ignore()
+#define __itt_thr_ignore_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_thr_ignore_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} legacy_threads group */
+
+/**
+ * @defgroup legacy_sync Synchronization
+ * @ingroup legacy
+ * Synchronization group
+ * @warning Legacy API
+ * @{
+ */
+/**
+ * @hideinitializer
+ * @brief possible value of attribute argument for sync object type
+ */
+#define __itt_attr_barrier 1
+
+/**
+ * @hideinitializer
+ * @brief possible value of attribute argument for sync object type
+ */
+#define __itt_attr_mutex   2
+
+/**
+ * @deprecated Legacy API
+ * @brief Assign a name to a sync object using char or Unicode string
+ * @param[in] addr    - pointer to the sync object. You should use a real pointer to your object
+ *                      to make sure that the values don't clash with other object addresses
+ * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will
+ *                      be assumed to be of generic "User Synchronization" type
+ * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned
+ *                      to the object -- you can use the __itt_sync_rename call later to assign
+ *                      the name
+ * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the
+ *                      exact semantics of how prepare/acquired/releasing calls work.
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+void ITTAPI __itt_sync_set_nameA(void *addr, const char    *objtype, const char    *objname, int attribute);
+void ITTAPI __itt_sync_set_nameW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute);
+#ifdef UNICODE
+#  define __itt_sync_set_name     __itt_sync_set_nameW
+#  define __itt_sync_set_name_ptr __itt_sync_set_nameW_ptr
+#else /* UNICODE */
+#  define __itt_sync_set_name     __itt_sync_set_nameA
+#  define __itt_sync_set_name_ptr __itt_sync_set_nameA_ptr
+#endif /* UNICODE */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+void ITTAPI __itt_sync_set_name(void *addr, const char* objtype, const char* objname, int attribute);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char    *objtype, const char    *objname, int attribute), (addr, objtype, objname, attribute))
+ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (addr, objtype, objname, attribute))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUBV(ITTAPI, void, sync_set_name,  (void *addr, const char    *objtype, const char    *objname, int attribute), (addr, objtype, objname, attribute))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_set_nameA     ITTNOTIFY_VOID(sync_set_nameA)
+#define __itt_sync_set_nameA_ptr ITTNOTIFY_NAME(sync_set_nameA)
+#define __itt_sync_set_nameW     ITTNOTIFY_VOID(sync_set_nameW)
+#define __itt_sync_set_nameW_ptr ITTNOTIFY_NAME(sync_set_nameW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_set_name     ITTNOTIFY_VOID(sync_set_name)
+#define __itt_sync_set_name_ptr ITTNOTIFY_NAME(sync_set_name)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_set_nameA(addr, objtype, objname, attribute)
+#define __itt_sync_set_nameA_ptr 0
+#define __itt_sync_set_nameW(addr, objtype, objname, attribute)
+#define __itt_sync_set_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_set_name(addr, objtype, objname, attribute)
+#define __itt_sync_set_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_sync_set_nameA_ptr 0
+#define __itt_sync_set_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_sync_set_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Assign a name and type to a sync object using char or Unicode string
+ * @param[in] addr -      pointer to the sync object. You should use a real pointer to your object
+ *                        to make sure that the values don't clash with other object addresses
+ * @param[in] objtype -   null-terminated object type string. If NULL is passed, the object will
+ *                        be assumed to be of generic "User Synchronization" type
+ * @param[in] objname -   null-terminated object name string. If NULL, no name will be assigned
+ *                        to the object -- you can use the __itt_sync_rename call later to assign
+ *                        the name
+ * @param[in] typelen, namelen -   a lenght of string for appropriate objtype and objname parameter
+ * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the
+ *                        exact semantics of how prepare/acquired/releasing calls work.
+ * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched)
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+int LIBITTAPI __itt_notify_sync_nameA(void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute);
+int LIBITTAPI __itt_notify_sync_nameW(void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute);
+#ifdef UNICODE
+#  define __itt_notify_sync_name __itt_notify_sync_nameW
+#else
+#  define __itt_notify_sync_name __itt_notify_sync_nameA
+#endif
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+int LIBITTAPI __itt_notify_sync_name(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute))
+ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, int, notify_sync_name,  (void *addr, const char    *objtype, int typelen, const char    *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_notify_sync_nameA     ITTNOTIFY_DATA(notify_sync_nameA)
+#define __itt_notify_sync_nameA_ptr ITTNOTIFY_NAME(notify_sync_nameA)
+#define __itt_notify_sync_nameW     ITTNOTIFY_DATA(notify_sync_nameW)
+#define __itt_notify_sync_nameW_ptr ITTNOTIFY_NAME(notify_sync_nameW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_notify_sync_name     ITTNOTIFY_DATA(notify_sync_name)
+#define __itt_notify_sync_name_ptr ITTNOTIFY_NAME(notify_sync_name)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_notify_sync_nameA(addr, objtype, typelen, objname, namelen, attribute)
+#define __itt_notify_sync_nameA_ptr 0
+#define __itt_notify_sync_nameW(addr, objtype, typelen, objname, namelen, attribute)
+#define __itt_notify_sync_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_notify_sync_name(addr, objtype, typelen, objname, namelen, attribute)
+#define __itt_notify_sync_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_notify_sync_nameA_ptr 0
+#define __itt_notify_sync_nameW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_notify_sync_name_ptr 0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Enter spin loop on user-defined sync object
+ */
+void LIBITTAPI __itt_notify_sync_prepare(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *addr), (addr))
+#define __itt_notify_sync_prepare     ITTNOTIFY_VOID(notify_sync_prepare)
+#define __itt_notify_sync_prepare_ptr ITTNOTIFY_NAME(notify_sync_prepare)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_notify_sync_prepare(addr)
+#define __itt_notify_sync_prepare_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_notify_sync_prepare_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Quit spin loop without acquiring spin object
+ */
+void LIBITTAPI __itt_notify_sync_cancel(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *addr), (addr))
+#define __itt_notify_sync_cancel     ITTNOTIFY_VOID(notify_sync_cancel)
+#define __itt_notify_sync_cancel_ptr ITTNOTIFY_NAME(notify_sync_cancel)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_notify_sync_cancel(addr)
+#define __itt_notify_sync_cancel_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_notify_sync_cancel_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Successful spin loop completion (sync object acquired)
+ */
+void LIBITTAPI __itt_notify_sync_acquired(void *addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *addr), (addr))
+#define __itt_notify_sync_acquired     ITTNOTIFY_VOID(notify_sync_acquired)
+#define __itt_notify_sync_acquired_ptr ITTNOTIFY_NAME(notify_sync_acquired)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_notify_sync_acquired(addr)
+#define __itt_notify_sync_acquired_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_notify_sync_acquired_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Start sync object releasing code. Is called before the lock release call.
+ */
+void LIBITTAPI __itt_notify_sync_releasing(void* addr);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *addr), (addr))
+#define __itt_notify_sync_releasing     ITTNOTIFY_VOID(notify_sync_releasing)
+#define __itt_notify_sync_releasing_ptr ITTNOTIFY_NAME(notify_sync_releasing)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_notify_sync_releasing(addr)
+#define __itt_notify_sync_releasing_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_notify_sync_releasing_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} legacy_sync group */
+
+#ifndef _ITTNOTIFY_H_
+/**
+ * @defgroup legacy_events Events
+ * @ingroup legacy
+ * Events group
+ * @{
+ */
+
+/** @brief user event type */
+typedef int __itt_event;
+
+/**
+ * @brief Create an event notification
+ * @note name or namelen being null/name and namelen not matching, user event feature not enabled
+ * @return non-zero event identifier upon success and __itt_err otherwise
+ */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+__itt_event LIBITTAPI __itt_event_createA(const char    *name, int namelen);
+__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen);
+#ifdef UNICODE
+#  define __itt_event_create     __itt_event_createW
+#  define __itt_event_create_ptr __itt_event_createW_ptr
+#else
+#  define __itt_event_create     __itt_event_createA
+#  define __itt_event_create_ptr __itt_event_createA_ptr
+#endif /* UNICODE */
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char    *name, int namelen), (name, namelen))
+ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (name, namelen))
+#else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ITT_STUB(LIBITTAPI, __itt_event, event_create,  (const char *name, int namelen), (name, namelen))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA     ITTNOTIFY_DATA(event_createA)
+#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA)
+#define __itt_event_createW     ITTNOTIFY_DATA(event_createW)
+#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create      ITTNOTIFY_DATA(event_create)
+#define __itt_event_create_ptr  ITTNOTIFY_NAME(event_create)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA(name, namelen) (__itt_event)0
+#define __itt_event_createA_ptr 0
+#define __itt_event_createW(name, namelen) (__itt_event)0
+#define __itt_event_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create(name, namelen)  (__itt_event)0
+#define __itt_event_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_event_createA_ptr 0
+#define __itt_event_createW_ptr 0
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define __itt_event_create_ptr  0
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an event occurrence.
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
+ */
+int LIBITTAPI __itt_event_start(__itt_event event);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (event))
+#define __itt_event_start     ITTNOTIFY_DATA(event_start)
+#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_event_start(event) (int)0
+#define __itt_event_start_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_event_start_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @brief Record an event end occurrence.
+ * @note It is optional if events do not have durations.
+ * @return __itt_err upon failure (invalid event id/user event feature not enabled)
+ */
+int LIBITTAPI __itt_event_end(__itt_event event);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (event))
+#define __itt_event_end     ITTNOTIFY_DATA(event_end)
+#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_event_end(event) (int)0
+#define __itt_event_end_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_event_end_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} legacy_events group */
+#endif /* _ITTNOTIFY_H_ */
+
+/**
+ * @defgroup legacy_memory Memory Accesses
+ * @ingroup legacy
+ */
+
+/**
+ * @deprecated Legacy API
+ * @brief Inform the tool of memory accesses on reading
+ */
+void LIBITTAPI __itt_memory_read(void *addr, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size), (addr, size))
+#define __itt_memory_read     ITTNOTIFY_VOID(memory_read)
+#define __itt_memory_read_ptr ITTNOTIFY_NAME(memory_read)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_memory_read(addr, size)
+#define __itt_memory_read_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_memory_read_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Inform the tool of memory accesses on writing
+ */
+void LIBITTAPI __itt_memory_write(void *addr, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size), (addr, size))
+#define __itt_memory_write     ITTNOTIFY_VOID(memory_write)
+#define __itt_memory_write_ptr ITTNOTIFY_NAME(memory_write)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_memory_write(addr, size)
+#define __itt_memory_write_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_memory_write_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief Inform the tool of memory accesses on updating
+ */
+void LIBITTAPI __itt_memory_update(void *address, size_t size);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (addr, size))
+#define __itt_memory_update     ITTNOTIFY_VOID(memory_update)
+#define __itt_memory_update_ptr ITTNOTIFY_NAME(memory_update)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_memory_update(addr, size)
+#define __itt_memory_update_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_memory_update_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} legacy_memory group */
+
+/**
+ * @defgroup legacy_state Thread and Object States
+ * @ingroup legacy
+ */
+
+/** @brief state type */
+typedef int __itt_state_t;
+
+/** @cond exclude_from_documentation */
+typedef enum __itt_obj_state {
+    __itt_obj_state_err = 0,
+    __itt_obj_state_clr = 1,
+    __itt_obj_state_set = 2,
+    __itt_obj_state_use = 3
+} __itt_obj_state_t;
+
+typedef enum __itt_thr_state {
+    __itt_thr_state_err = 0,
+    __itt_thr_state_clr = 1,
+    __itt_thr_state_set = 2
+} __itt_thr_state_t;
+
+typedef enum __itt_obj_prop {
+    __itt_obj_prop_watch    = 1,
+    __itt_obj_prop_ignore   = 2,
+    __itt_obj_prop_sharable = 3
+} __itt_obj_prop_t;
+
+typedef enum __itt_thr_prop {
+    __itt_thr_prop_quiet = 1
+} __itt_thr_prop_t;
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief managing thread and object states
+ */
+__itt_state_t LIBITTAPI __itt_state_get(void);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, __itt_state_t, state_get, (void), ())
+#define __itt_state_get     ITTNOTIFY_DATA(state_get)
+#define __itt_state_get_ptr ITTNOTIFY_NAME(state_get)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_state_get(void) (__itt_state_t)0
+#define __itt_state_get_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_state_get_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief managing thread and object states
+ */
+__itt_state_t LIBITTAPI __itt_state_set(__itt_state_t s);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, __itt_state_t, state_set, (__itt_state_t s), (s))
+#define __itt_state_set     ITTNOTIFY_DATA(state_set)
+#define __itt_state_set_ptr ITTNOTIFY_NAME(state_set)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_state_set(s) (__itt_state_t)0
+#define __itt_state_set_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_state_set_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief managing thread and object modes
+ */
+__itt_thr_state_t LIBITTAPI __itt_thr_mode_set(__itt_thr_prop_t p, __itt_thr_state_t s);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (p, s))
+#define __itt_thr_mode_set     ITTNOTIFY_DATA(thr_mode_set)
+#define __itt_thr_mode_set_ptr ITTNOTIFY_NAME(thr_mode_set)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_thr_mode_set(p, s) (__itt_thr_state_t)0
+#define __itt_thr_mode_set_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_thr_mode_set_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+
+/**
+ * @deprecated Legacy API
+ * @brief managing thread and object modes
+ */
+__itt_obj_state_t LIBITTAPI __itt_obj_mode_set(__itt_obj_prop_t p, __itt_obj_state_t s);
+
+/** @cond exclude_from_documentation */
+#ifndef INTEL_NO_MACRO_BODY
+#ifndef INTEL_NO_ITTNOTIFY_API
+ITT_STUB(ITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (p, s))
+#define __itt_obj_mode_set     ITTNOTIFY_DATA(obj_mode_set)
+#define __itt_obj_mode_set_ptr ITTNOTIFY_NAME(obj_mode_set)
+#else  /* INTEL_NO_ITTNOTIFY_API */
+#define __itt_obj_mode_set(p, s) (__itt_obj_state_t)0
+#define __itt_obj_mode_set_ptr 0
+#endif /* INTEL_NO_ITTNOTIFY_API */
+#else  /* INTEL_NO_MACRO_BODY */
+#define __itt_obj_mode_set_ptr 0
+#endif /* INTEL_NO_MACRO_BODY */
+/** @endcond */
+/** @} legacy_state group */
+
+/** @cond exclude_from_documentation */
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+/** @endcond */
+
+#endif /* _LEGACY_ITTNOTIFY_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h
new file mode 100644 (file)
index 0000000..89fb5cf
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _PROTOTYPE_ITTNOTIFY_H_
+#define _PROTOTYPE_ITTNOTIFY_H_
+/**
+ * @file
+ * @brief Prototype User API functions and types
+ */
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+#  define ITT_OS_WIN   1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+#  define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+#  define ITT_OS_MAC   3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+#  if defined WIN32 || defined _WIN32
+#    define ITT_OS ITT_OS_WIN
+#  elif defined( __APPLE__ ) && defined( __MACH__ )
+#    define ITT_OS ITT_OS_MAC
+#  else
+#    define ITT_OS ITT_OS_LINUX
+#  endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+#  define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+#  define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+#  if ITT_OS==ITT_OS_WIN
+#    define ITT_PLATFORM ITT_PLATFORM_WIN
+#  else
+#    define ITT_PLATFORM ITT_PLATFORM_POSIX
+#  endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#include <stddef.h>
+#include <stdarg.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define CDECL __cdecl
+#  else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define CDECL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+#  if ITT_PLATFORM==ITT_PLATFORM_WIN
+#    define STDCALL __stdcall
+#  else  /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#    define STDCALL /* nothing */
+#  endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI_CALL    CDECL
+#define LIBITTAPI_CALL /* nothing */
+
+#define ITT_JOIN_AUX(p,n) p##n
+#define ITT_JOIN(p,n)     ITT_JOIN_AUX(p,n)
+
+#ifndef INTEL_ITTNOTIFY_PREFIX
+#  define INTEL_ITTNOTIFY_PREFIX __itt_
+#endif /* INTEL_ITTNOTIFY_PREFIX */
+#ifndef INTEL_ITTNOTIFY_POSTFIX
+#  define INTEL_ITTNOTIFY_POSTFIX _ptr_
+#endif /* INTEL_ITTNOTIFY_POSTFIX */
+
+#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n)
+#define ITTNOTIFY_NAME(n)     ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX))
+
+#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n)
+#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ?       0 : ITTNOTIFY_NAME(n)
+
+#ifdef ITT_STUB
+#undef ITT_STUB
+#endif
+#ifdef ITT_STUBV
+#undef ITT_STUBV
+#endif
+#define ITT_STUBV(api,type,name,args,params)                      \
+    typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args;   \
+    extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name);
+#define ITT_STUB ITT_STUBV
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/** @endcond */
+
+/**
+ * @defgroup prototype Prototype API
+ * @{
+ * @}
+ */
+
+/****************************************************************************
+ * ??? group
+ ****************************************************************************/
+
+/** @cond exclude_from_documentation */
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+/** @endcond */
+
+#endif /* _PROTOTYPE_ITTNOTIFY_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win32-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win32-tbb-export.def
new file mode 100644 (file)
index 0000000..8a8ead2
--- /dev/null
@@ -0,0 +1,297 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+#include "tbb/tbb_config.h"
+
+EXPORTS
+
+; Assembly-language support that is called directly by clients
+;__TBB_machine_cmpswp1
+;__TBB_machine_cmpswp2
+;__TBB_machine_cmpswp4
+__TBB_machine_cmpswp8
+;__TBB_machine_fetchadd1
+;__TBB_machine_fetchadd2
+;__TBB_machine_fetchadd4
+__TBB_machine_fetchadd8
+;__TBB_machine_fetchstore1
+;__TBB_machine_fetchstore2
+;__TBB_machine_fetchstore4
+__TBB_machine_fetchstore8
+__TBB_machine_store8
+__TBB_machine_load8
+__TBB_machine_trylockbyte
+
+; cache_aligned_allocator.cpp
+?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z
+?NFS_GetLineSize@internal@tbb@@YAIXZ
+?NFS_Free@internal@tbb@@YAXPAX@Z
+?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z
+?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z
+?is_malloc_used_v3@internal@tbb@@YA_NXZ
+
+; task.cpp v3
+?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBEAAVtask@3@I@Z
+?allocate@allocate_child_proxy@internal@tbb@@QBEAAVtask@3@I@Z
+?allocate@allocate_continuation_proxy@internal@tbb@@QBEAAVtask@3@I@Z
+?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z
+?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z
+?free@allocate_additional_child_of_proxy@internal@tbb@@QBEXAAVtask@3@@Z
+?free@allocate_child_proxy@internal@tbb@@QBEXAAVtask@3@@Z
+?free@allocate_continuation_proxy@internal@tbb@@QBEXAAVtask@3@@Z
+?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z
+?internal_set_ref_count@task@tbb@@AAEXH@Z
+?internal_decrement_ref_count@task@tbb@@AAEHXZ
+?is_owned_by_current_thread@task@tbb@@QBE_NXZ
+?note_affinity@task@tbb@@UAEXG@Z
+?resize@affinity_partitioner_base_v3@internal@tbb@@AAEXI@Z
+?self@task@tbb@@SAAAV12@XZ
+?spawn_and_wait_for_all@task@tbb@@QAEXAAVtask_list@2@@Z
+?default_num_threads@task_scheduler_init@tbb@@SAHXZ
+?initialize@task_scheduler_init@tbb@@QAEXHI@Z
+?initialize@task_scheduler_init@tbb@@QAEXH@Z
+?terminate@task_scheduler_init@tbb@@QAEXXZ
+?observe@task_scheduler_observer_v3@internal@tbb@@QAEX_N@Z
+
+#if !TBB_NO_LEGACY
+; task_v2.cpp
+?destroy@task@tbb@@QAEXAAV12@@Z
+#endif
+
+; exception handling support
+#if __TBB_TASK_GROUP_CONTEXT
+?allocate@allocate_root_with_context_proxy@internal@tbb@@QBEAAVtask@3@I@Z
+?free@allocate_root_with_context_proxy@internal@tbb@@QBEXAAVtask@3@@Z
+?is_group_execution_cancelled@task_group_context@tbb@@QBE_NXZ
+?cancel_group_execution@task_group_context@tbb@@QAE_NXZ
+?reset@task_group_context@tbb@@QAEXXZ
+?init@task_group_context@tbb@@IAEXXZ
+?register_pending_exception@task_group_context@tbb@@QAEXXZ
+??1task_group_context@tbb@@QAE@XZ
+?name@captured_exception@tbb@@UBEPBDXZ
+?what@captured_exception@tbb@@UBEPBDXZ
+??1captured_exception@tbb@@UAE@XZ
+?move@captured_exception@tbb@@UAEPAV12@XZ
+?destroy@captured_exception@tbb@@UAEXXZ
+?set@captured_exception@tbb@@QAEXPBD0@Z
+?clear@captured_exception@tbb@@QAEXXZ
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+; Symbols for exceptions thrown from TBB
+?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ
+?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z
+?what@bad_last_alloc@tbb@@UBEPBDXZ
+?what@missing_wait@tbb@@UBEPBDXZ
+?what@invalid_multiple_scheduling@tbb@@UBEPBDXZ
+?what@improper_lock@tbb@@UBEPBDXZ
+
+; tbb_misc.cpp
+?assertion_failure@tbb@@YAXPBDH00@Z
+?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ
+?handle_perror@internal@tbb@@YAXHPBD@Z
+?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z
+?runtime_warning@internal@tbb@@YAXPBDZZ
+TBB_runtime_interface_version
+
+; itt_notify.cpp
+?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z
+?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z
+?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z
+?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z
+
+; pipeline.cpp
+??0pipeline@tbb@@QAE@XZ
+??1filter@tbb@@UAE@XZ
+??1pipeline@tbb@@UAE@XZ
+??_7pipeline@tbb@@6B@
+?add_filter@pipeline@tbb@@QAEXAAVfilter@2@@Z
+?clear@pipeline@tbb@@QAEXXZ
+?inject_token@pipeline@tbb@@AAEXAAVtask@2@@Z
+?run@pipeline@tbb@@QAEXI@Z
+#if __TBB_TASK_GROUP_CONTEXT
+?run@pipeline@tbb@@QAEXIAAVtask_group_context@2@@Z
+#endif
+?process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ
+?try_process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ
+
+; queuing_rw_mutex.cpp
+?internal_construct@queuing_rw_mutex@tbb@@QAEXXZ
+?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAEXAAV23@_N@Z
+?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ
+?release@scoped_lock@queuing_rw_mutex@tbb@@QAEXXZ
+?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ
+?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAE_NAAV23@_N@Z
+
+; reader_writer_lock.cpp
+?try_lock_read@reader_writer_lock@interface5@tbb@@QAE_NXZ
+?try_lock@reader_writer_lock@interface5@tbb@@QAE_NXZ
+?unlock@reader_writer_lock@interface5@tbb@@QAEXXZ
+?lock_read@reader_writer_lock@interface5@tbb@@QAEXXZ
+?lock@reader_writer_lock@interface5@tbb@@QAEXXZ
+?internal_construct@reader_writer_lock@interface5@tbb@@AAEXXZ
+?internal_destroy@reader_writer_lock@interface5@tbb@@AAEXXZ
+?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z
+?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXXZ
+?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z
+?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXXZ
+
+#if !TBB_NO_LEGACY
+; spin_rw_mutex.cpp v2
+?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z
+?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z
+?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z
+?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z
+?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z
+?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z
+?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z
+?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z
+?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z
+#endif
+
+; spin_rw_mutex v3
+?internal_construct@spin_rw_mutex_v3@tbb@@AAEXXZ
+?internal_upgrade@spin_rw_mutex_v3@tbb@@AAE_NXZ
+?internal_downgrade@spin_rw_mutex_v3@tbb@@AAEXXZ
+?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAEXXZ
+?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ
+?internal_release_reader@spin_rw_mutex_v3@tbb@@AAEXXZ
+?internal_release_writer@spin_rw_mutex_v3@tbb@@AAEXXZ
+?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAE_NXZ
+?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ
+
+; spin_mutex.cpp
+?internal_construct@spin_mutex@tbb@@QAEXXZ
+?internal_acquire@scoped_lock@spin_mutex@tbb@@AAEXAAV23@@Z
+?internal_release@scoped_lock@spin_mutex@tbb@@AAEXXZ
+?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAE_NAAV23@@Z
+
+; mutex.cpp
+?internal_acquire@scoped_lock@mutex@tbb@@AAEXAAV23@@Z
+?internal_release@scoped_lock@mutex@tbb@@AAEXXZ
+?internal_try_acquire@scoped_lock@mutex@tbb@@AAE_NAAV23@@Z
+?internal_construct@mutex@tbb@@AAEXXZ
+?internal_destroy@mutex@tbb@@AAEXXZ
+
+; recursive_mutex.cpp
+?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAEXAAV23@@Z
+?internal_release@scoped_lock@recursive_mutex@tbb@@AAEXXZ
+?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAE_NAAV23@@Z
+?internal_construct@recursive_mutex@tbb@@AAEXXZ
+?internal_destroy@recursive_mutex@tbb@@AAEXXZ
+
+; queuing_mutex.cpp
+?internal_construct@queuing_mutex@tbb@@QAEXXZ
+?acquire@scoped_lock@queuing_mutex@tbb@@QAEXAAV23@@Z
+?release@scoped_lock@queuing_mutex@tbb@@QAEXXZ
+?try_acquire@scoped_lock@queuing_mutex@tbb@@QAE_NAAV23@@Z
+
+; critical_section.cpp
+?internal_construct@critical_section_v4@internal@tbb@@QAEXXZ
+
+#if !TBB_NO_LEGACY
+; concurrent_hash_map.cpp
+?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBE_NXZ
+
+; concurrent_queue.cpp v2
+?advance@concurrent_queue_iterator_base@internal@tbb@@IAEXXZ
+?assign@concurrent_queue_iterator_base@internal@tbb@@IAEXABV123@@Z
+?internal_size@concurrent_queue_base@internal@tbb@@IBEHXZ
+??0concurrent_queue_base@internal@tbb@@IAE@I@Z
+??0concurrent_queue_iterator_base@internal@tbb@@IAE@ABVconcurrent_queue_base@12@@Z
+??1concurrent_queue_base@internal@tbb@@MAE@XZ
+??1concurrent_queue_iterator_base@internal@tbb@@IAE@XZ
+?internal_pop@concurrent_queue_base@internal@tbb@@IAEXPAX@Z
+?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAE_NPAX@Z
+?internal_push@concurrent_queue_base@internal@tbb@@IAEXPBX@Z
+?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAE_NPBX@Z
+?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAEXHI@Z
+#endif
+
+; concurrent_queue v3
+??1concurrent_queue_iterator_base_v3@internal@tbb@@IAE@XZ
+??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@@Z
+??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@I@Z
+?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXXZ
+?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXABV123@@Z
+??0concurrent_queue_base_v3@internal@tbb@@IAE@I@Z
+??1concurrent_queue_base_v3@internal@tbb@@MAE@XZ
+?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAEXPAX@Z
+?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAE_NPAX@Z
+?internal_push@concurrent_queue_base_v3@internal@tbb@@IAEXPBX@Z
+?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAE_NPBX@Z
+?internal_size@concurrent_queue_base_v3@internal@tbb@@IBEHXZ
+?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBE_NXZ
+?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAEXHI@Z
+?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAEXXZ
+?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBEXXZ
+?assign@concurrent_queue_base_v3@internal@tbb@@IAEXABV123@@Z
+
+#if !TBB_NO_LEGACY
+; concurrent_vector.cpp v2
+?internal_assign@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z
+?internal_capacity@concurrent_vector_base@internal@tbb@@IBEIXZ
+?internal_clear@concurrent_vector_base@internal@tbb@@IAEXP6AXPAXI@Z_N@Z
+?internal_copy@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z
+?internal_grow_by@concurrent_vector_base@internal@tbb@@IAEIIIP6AXPAXI@Z@Z
+?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAEXIIP6AXPAXI@Z@Z
+?internal_push_back@concurrent_vector_base@internal@tbb@@IAEPAXIAAI@Z
+?internal_reserve@concurrent_vector_base@internal@tbb@@IAEXIII@Z
+#endif
+
+; concurrent_vector v3
+??1concurrent_vector_base_v3@internal@tbb@@IAE@XZ
+?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z
+?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBEIXZ
+?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAEIP6AXPAXI@Z@Z
+?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z
+?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z
+?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAEXIIP6AXPAXPBXI@Z1@Z
+?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAEPAXIAAI@Z
+?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAEXIII@Z
+?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAEPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z
+?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAEXAAV123@@Z
+?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBEXI@Z
+?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAEXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z
+?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z
+
+; tbb_thread
+?join@tbb_thread_v3@internal@tbb@@QAEXXZ
+?detach@tbb_thread_v3@internal@tbb@@QAEXXZ
+?internal_start@tbb_thread_v3@internal@tbb@@AAEXP6GIPAX@Z0@Z
+?allocate_closure_v3@internal@tbb@@YAPAXI@Z
+?free_closure_v3@internal@tbb@@YAXPAX@Z
+?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ
+?thread_yield_v3@internal@tbb@@YAXXZ
+?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z
+?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z
+?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ
+
+; condition_variable
+?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z
+?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z
+?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z
+?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z
+?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def
new file mode 100644 (file)
index 0000000..d95078d
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#include "tbb/tbb_config.h"
+
+{
+global:
+
+/* cache_aligned_allocator.cpp */
+_ZN3tbb8internal12NFS_AllocateEyyPv; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal15NFS_GetLineSizeEv;
+_ZN3tbb8internal8NFS_FreeEPv;
+_ZN3tbb8internal23allocate_via_handler_v3Ey; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25deallocate_via_handler_v3EPv;
+_ZN3tbb8internal17is_malloc_used_v3Ev;
+
+/* task.cpp v3 */
+_ZN3tbb4task13note_affinityEt;
+_ZN3tbb4task22internal_set_ref_countEi;
+_ZN3tbb4task28internal_decrement_ref_countEv;
+_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE;
+_ZN3tbb4task4selfEv;
+_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE;
+_ZNK3tbb4task26is_owned_by_current_threadEv;
+_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE;
+_ZN3tbb8internal19allocate_root_proxy8allocateEy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj;
+_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal20allocate_child_proxy8allocateEy; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal27allocate_continuation_proxy8allocateEy; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE;
+_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEy; // MODIFIED LINUX ENTRY
+_ZTIN3tbb4taskE;
+_ZTSN3tbb4taskE;
+_ZTVN3tbb4taskE;
+_ZN3tbb19task_scheduler_init19default_num_threadsEv;
+_ZN3tbb19task_scheduler_init10initializeEiy;  // MODIFIED LINUX ENTRY
+_ZN3tbb19task_scheduler_init10initializeEi;
+_ZN3tbb19task_scheduler_init9terminateEv;
+_ZN3tbb8internal26task_scheduler_observer_v37observeEb;
+_ZN3tbb10empty_task7executeEv;
+_ZN3tbb10empty_taskD0Ev;
+_ZN3tbb10empty_taskD1Ev;
+_ZTIN3tbb10empty_taskE;
+_ZTSN3tbb10empty_taskE;
+_ZTVN3tbb10empty_taskE;
+
+#if !TBB_NO_LEGACY
+/* task_v2.cpp */
+_ZN3tbb4task7destroyERS0_;
+#endif /* !TBB_NO_LEGACY */
+
+/* Exception handling in task scheduler */
+#if __TBB_TASK_GROUP_CONTEXT
+_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEy; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE;
+_ZNK3tbb18task_group_context28is_group_execution_cancelledEv;
+_ZN3tbb18task_group_context22cancel_group_executionEv;
+_ZN3tbb18task_group_context26register_pending_exceptionEv;
+_ZN3tbb18task_group_context5resetEv;
+_ZN3tbb18task_group_context4initEv;
+_ZN3tbb18task_group_contextD1Ev;
+_ZN3tbb18task_group_contextD2Ev;
+_ZNK3tbb18captured_exception4nameEv;
+_ZNK3tbb18captured_exception4whatEv;
+_ZN3tbb18captured_exception10throw_selfEv;
+_ZN3tbb18captured_exception3setEPKcS2_;
+_ZN3tbb18captured_exception4moveEv;
+_ZN3tbb18captured_exception5clearEv;
+_ZN3tbb18captured_exception7destroyEv;
+_ZN3tbb18captured_exception8allocateEPKcS2_;
+_ZN3tbb18captured_exceptionD0Ev;
+_ZN3tbb18captured_exceptionD1Ev;
+_ZTIN3tbb18captured_exceptionE;
+_ZTSN3tbb18captured_exceptionE;
+_ZTVN3tbb18captured_exceptionE;
+_ZN3tbb13tbb_exceptionD2Ev;
+_ZTIN3tbb13tbb_exceptionE;
+_ZTSN3tbb13tbb_exceptionE;
+_ZTVN3tbb13tbb_exceptionE;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+/* Symbols for exceptions thrown from TBB */
+_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev;
+_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE;
+_ZN3tbb14bad_last_allocD0Ev;
+_ZN3tbb14bad_last_allocD1Ev;
+_ZNK3tbb14bad_last_alloc4whatEv;
+_ZTIN3tbb14bad_last_allocE;
+_ZTSN3tbb14bad_last_allocE;
+_ZTVN3tbb14bad_last_allocE;
+_ZN3tbb12missing_waitD0Ev;
+_ZN3tbb12missing_waitD1Ev;
+_ZNK3tbb12missing_wait4whatEv;
+_ZTIN3tbb12missing_waitE;
+_ZTSN3tbb12missing_waitE;
+_ZTVN3tbb12missing_waitE;
+_ZN3tbb27invalid_multiple_schedulingD0Ev;
+_ZN3tbb27invalid_multiple_schedulingD1Ev;
+_ZNK3tbb27invalid_multiple_scheduling4whatEv;
+_ZTIN3tbb27invalid_multiple_schedulingE;
+_ZTSN3tbb27invalid_multiple_schedulingE;
+_ZTVN3tbb27invalid_multiple_schedulingE;
+_ZN3tbb13improper_lockD0Ev;
+_ZN3tbb13improper_lockD1Ev;
+_ZNK3tbb13improper_lock4whatEv;
+_ZTIN3tbb13improper_lockE;
+_ZTSN3tbb13improper_lockE;
+_ZTVN3tbb13improper_lockE;
+
+/* tbb_misc.cpp */
+_ZN3tbb17assertion_failureEPKciS1_S1_;
+_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E;
+_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv;
+_ZN3tbb8internal13handle_perrorEiPKc;
+_ZN3tbb8internal15runtime_warningEPKcz;
+TBB_runtime_interface_version;
+
+/* itt_notify.cpp */
+_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv;
+_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_;
+_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal19itt_load_pointer_v3EPKv;
+
+/* pipeline.cpp */
+_ZTIN3tbb6filterE;
+_ZTSN3tbb6filterE;
+_ZTVN3tbb6filterE;
+_ZN3tbb6filterD2Ev;
+_ZN3tbb8pipeline10add_filterERNS_6filterE;
+_ZN3tbb8pipeline12inject_tokenERNS_4taskE;
+_ZN3tbb8pipeline13remove_filterERNS_6filterE;
+_ZN3tbb8pipeline3runEy; // MODIFIED LINUX ENTRY
+#if __TBB_TASK_GROUP_CONTEXT
+_ZN3tbb8pipeline3runEyRNS_18task_group_contextE; // MODIFIED LINUX ENTRY
+#endif
+_ZN3tbb8pipeline5clearEv;
+_ZN3tbb19thread_bound_filter12process_itemEv;
+_ZN3tbb19thread_bound_filter16try_process_itemEv;
+_ZTIN3tbb8pipelineE;
+_ZTSN3tbb8pipelineE;
+_ZTVN3tbb8pipelineE;
+_ZN3tbb8pipelineC1Ev;
+_ZN3tbb8pipelineC2Ev;
+_ZN3tbb8pipelineD0Ev;
+_ZN3tbb8pipelineD1Ev;
+_ZN3tbb8pipelineD2Ev;
+
+/* queuing_rw_mutex.cpp */
+_ZN3tbb16queuing_rw_mutex18internal_constructEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b;
+_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv;
+_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b;
+
+/* reader_writer_lock.cpp */
+_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock13try_lock_readEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_;
+_ZN3tbb10interface518reader_writer_lock16internal_destroyEv;
+_ZN3tbb10interface518reader_writer_lock18internal_constructEv;
+_ZN3tbb10interface518reader_writer_lock4lockEv;
+_ZN3tbb10interface518reader_writer_lock6unlockEv;
+_ZN3tbb10interface518reader_writer_lock8try_lockEv;
+_ZN3tbb10interface518reader_writer_lock9lock_readEv;
+
+#if !TBB_NO_LEGACY
+/* spin_rw_mutex.cpp v2 */
+_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_;
+_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_;
+_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_;
+_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_;
+_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_;
+#endif
+
+/* spin_rw_mutex v3 */
+_ZN3tbb16spin_rw_mutex_v318internal_constructEv;
+_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv;
+_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv;
+_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv;
+_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv;
+
+/* spin_mutex.cpp */
+_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb10spin_mutex18internal_constructEv;
+
+/* mutex.cpp */
+_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb5mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb5mutex16internal_destroyEv;
+_ZN3tbb5mutex18internal_constructEv;
+
+/* recursive_mutex.cpp */
+_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_;
+_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv;
+_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_;
+_ZN3tbb15recursive_mutex16internal_destroyEv;
+_ZN3tbb15recursive_mutex18internal_constructEv;
+
+/* QueuingMutex.cpp */
+_ZN3tbb13queuing_mutex18internal_constructEv;
+_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_;
+_ZN3tbb13queuing_mutex11scoped_lock7releaseEv;
+_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_;
+
+/* critical_section.cpp */
+_ZN3tbb8internal19critical_section_v418internal_constructEv;
+
+#if !TBB_NO_LEGACY
+/* concurrent_hash_map */
+_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv;
+
+/* concurrent_queue.cpp v2 */
+_ZN3tbb8internal21concurrent_queue_base12internal_popEPv;
+_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv;
+_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityExy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv;
+_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv;
+_ZN3tbb8internal21concurrent_queue_baseC2Ey; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal21concurrent_queue_baseD2Ev;
+_ZTIN3tbb8internal21concurrent_queue_baseE;
+_ZTSN3tbb8internal21concurrent_queue_baseE;
+_ZTVN3tbb8internal21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_;
+_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv;
+_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE;
+_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev;
+_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv;
+#endif
+
+/* concurrent_queue v3 */
+/* constructors */
+_ZN3tbb8internal24concurrent_queue_base_v3C2Ey; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ey; // MODIFIED LINUX ENTRY
+/* destructors */
+_ZN3tbb8internal24concurrent_queue_base_v3D2Ev;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev;
+/* typeinfo */
+_ZTIN3tbb8internal24concurrent_queue_base_v3E;
+_ZTSN3tbb8internal24concurrent_queue_base_v3E;
+/* vtable */
+_ZTVN3tbb8internal24concurrent_queue_base_v3E;
+/* methods */
+_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_;
+_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv;
+_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv;
+_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv;
+_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv;
+_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityExy; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv;
+_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv;
+_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv;
+_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_;
+
+#if !TBB_NO_LEGACY
+/* concurrent_vector.cpp v2 */
+_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_yPFvPvPKvyE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvyEb; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEyyPFvPvyE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base16internal_reserveEyyy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base18internal_push_backEyRy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEyyPFvPvyE; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv;
+#endif
+
+/* concurrent_vector v3 */
+_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_yPFvPvPKvyE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvyE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEyyy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEyRy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY
+_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv;
+_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEyPvPFvS2_yEPFvS2_PKvyE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_;
+_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEy; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v3D2Ev;
+_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEyyyPKvPFvPvyEPFvS4_S3_yE; // MODIFIED LINUX ENTRY
+_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY
+
+/* tbb_thread */
+_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv;
+_ZN3tbb8internal13tbb_thread_v36detachEv;
+_ZN3tbb8internal16thread_get_id_v3Ev;
+_ZN3tbb8internal15free_closure_v3EPv;
+_ZN3tbb8internal13tbb_thread_v34joinEv;
+_ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_;  // MODIFIED LINUX ENTRY
+_ZN3tbb8internal19allocate_closure_v3Ey;  // MODIFIED LINUX ENTRY
+_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_;
+_ZN3tbb8internal15thread_yield_v3Ev;
+_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE;
+
+/* condition_variable */
+_ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE;
+_ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE;
+_ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE;
+
+local:
+
+/* TBB symbols */
+*3tbb*;
+*__TBB*;
+
+/* Intel Compiler (libirc) symbols */
+__intel_*;
+_intel_*;
+get_msg_buf;
+get_text_buf;
+message_catalog;
+print_buf;
+irc__get_msg;
+irc__print;
+
+};
+
+
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/win64-tbb-export.def
new file mode 100644 (file)
index 0000000..1ca8ed7
--- /dev/null
@@ -0,0 +1,293 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+; This file is organized with a section for each .cpp file.
+; Each of these sections is in alphabetical order.
+
+#include "tbb/tbb_config.h"
+
+EXPORTS
+
+; Assembly-language support that is called directly by clients
+__TBB_machine_cmpswp1
+__TBB_machine_fetchadd1
+__TBB_machine_fetchstore1
+__TBB_machine_cmpswp2
+__TBB_machine_fetchadd2
+__TBB_machine_fetchstore2
+__TBB_machine_pause
+
+; cache_aligned_allocator.cpp
+?NFS_Allocate@internal@tbb@@YAPEAX_K0PEAX@Z
+?NFS_GetLineSize@internal@tbb@@YA_KXZ
+?NFS_Free@internal@tbb@@YAXPEAX@Z
+?allocate_via_handler_v3@internal@tbb@@YAPEAX_K@Z
+?deallocate_via_handler_v3@internal@tbb@@YAXPEAX@Z
+?is_malloc_used_v3@internal@tbb@@YA_NXZ
+
+
+; task.cpp v3
+?resize@affinity_partitioner_base_v3@internal@tbb@@AEAAXI@Z
+?allocate@allocate_additional_child_of_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z
+?allocate@allocate_child_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z
+?allocate@allocate_continuation_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z
+?allocate@allocate_root_proxy@internal@tbb@@SAAEAVtask@3@_K@Z
+?destroy@task_base@internal@interface5@tbb@@SAXAEAVtask@4@@Z
+?free@allocate_additional_child_of_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z
+?free@allocate_child_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z
+?free@allocate_continuation_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z
+?free@allocate_root_proxy@internal@tbb@@SAXAEAVtask@3@@Z
+?internal_set_ref_count@task@tbb@@AEAAXH@Z
+?internal_decrement_ref_count@task@tbb@@AEAA_JXZ
+?is_owned_by_current_thread@task@tbb@@QEBA_NXZ
+?note_affinity@task@tbb@@UEAAXG@Z
+?self@task@tbb@@SAAEAV12@XZ
+?spawn_and_wait_for_all@task@tbb@@QEAAXAEAVtask_list@2@@Z
+?default_num_threads@task_scheduler_init@tbb@@SAHXZ
+?initialize@task_scheduler_init@tbb@@QEAAXH_K@Z
+?initialize@task_scheduler_init@tbb@@QEAAXH@Z
+?terminate@task_scheduler_init@tbb@@QEAAXXZ
+?observe@task_scheduler_observer_v3@internal@tbb@@QEAAX_N@Z
+
+#if !TBB_NO_LEGACY
+; task_v2.cpp
+?destroy@task@tbb@@QEAAXAEAV12@@Z
+#endif
+
+; Exception handling in task scheduler
+#if __TBB_TASK_GROUP_CONTEXT
+?allocate@allocate_root_with_context_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z
+?free@allocate_root_with_context_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z
+?is_group_execution_cancelled@task_group_context@tbb@@QEBA_NXZ
+?cancel_group_execution@task_group_context@tbb@@QEAA_NXZ
+?reset@task_group_context@tbb@@QEAAXXZ
+?init@task_group_context@tbb@@IEAAXXZ
+?register_pending_exception@task_group_context@tbb@@QEAAXXZ
+??1task_group_context@tbb@@QEAA@XZ
+?name@captured_exception@tbb@@UEBAPEBDXZ
+?what@captured_exception@tbb@@UEBAPEBDXZ
+??1captured_exception@tbb@@UEAA@XZ
+?move@captured_exception@tbb@@UEAAPEAV12@XZ
+?destroy@captured_exception@tbb@@UEAAXXZ
+?set@captured_exception@tbb@@QEAAXPEBD0@Z
+?clear@captured_exception@tbb@@QEAAXXZ
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+; Symbols for exceptions thrown from TBB
+?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ
+?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z
+?what@bad_last_alloc@tbb@@UEBAPEBDXZ
+?what@missing_wait@tbb@@UEBAPEBDXZ
+?what@invalid_multiple_scheduling@tbb@@UEBAPEBDXZ
+?what@improper_lock@tbb@@UEBAPEBDXZ
+
+; tbb_misc.cpp
+?assertion_failure@tbb@@YAXPEBDH00@Z
+?get_initial_auto_partitioner_divisor@internal@tbb@@YA_KXZ
+?handle_perror@internal@tbb@@YAXHPEBD@Z
+?set_assertion_handler@tbb@@YAP6AXPEBDH00@ZP6AX0H00@Z@Z
+?runtime_warning@internal@tbb@@YAXPEBDZZ
+TBB_runtime_interface_version
+
+; itt_notify.cpp
+?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPEAXPEBX@Z
+?itt_store_pointer_with_release_v3@internal@tbb@@YAXPEAX0@Z
+?itt_load_pointer_v3@internal@tbb@@YAPEAXPEBX@Z
+?itt_set_sync_name_v3@internal@tbb@@YAXPEAXPEB_W@Z
+
+; pipeline.cpp
+??_7pipeline@tbb@@6B@
+??0pipeline@tbb@@QEAA@XZ
+??1filter@tbb@@UEAA@XZ
+??1pipeline@tbb@@UEAA@XZ
+?add_filter@pipeline@tbb@@QEAAXAEAVfilter@2@@Z
+?clear@pipeline@tbb@@QEAAXXZ
+?inject_token@pipeline@tbb@@AEAAXAEAVtask@2@@Z
+?run@pipeline@tbb@@QEAAX_K@Z
+#if __TBB_TASK_GROUP_CONTEXT
+?run@pipeline@tbb@@QEAAX_KAEAVtask_group_context@2@@Z
+#endif
+?process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ
+?try_process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ
+
+; queuing_rw_mutex.cpp
+?internal_construct@queuing_rw_mutex@tbb@@QEAAXXZ
+?acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAAXAEAV23@_N@Z
+?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ
+?release@scoped_lock@queuing_rw_mutex@tbb@@QEAAXXZ
+?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ
+?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NAEAV23@_N@Z
+
+; reader_writer_lock.cpp
+?try_lock_read@reader_writer_lock@interface5@tbb@@QEAA_NXZ
+?try_lock@reader_writer_lock@interface5@tbb@@QEAA_NXZ
+?unlock@reader_writer_lock@interface5@tbb@@QEAAXXZ
+?lock_read@reader_writer_lock@interface5@tbb@@QEAAXXZ
+?lock@reader_writer_lock@interface5@tbb@@QEAAXXZ
+?internal_construct@reader_writer_lock@interface5@tbb@@AEAAXXZ
+?internal_destroy@reader_writer_lock@interface5@tbb@@AEAAXXZ
+?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z
+?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXXZ
+?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z
+?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXXZ
+
+#if !TBB_NO_LEGACY
+; spin_rw_mutex.cpp v2
+?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPEAV12@@Z
+?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
+?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z
+?internal_downgrade@spin_rw_mutex@tbb@@CAXPEAV12@@Z
+?internal_upgrade@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
+?internal_release_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z
+?internal_release_writer@spin_rw_mutex@tbb@@CAXPEAV12@@Z
+?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
+?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPEAV12@@Z
+#endif
+
+; spin_rw_mutex v3
+?internal_construct@spin_rw_mutex_v3@tbb@@AEAAXXZ
+?internal_upgrade@spin_rw_mutex_v3@tbb@@AEAA_NXZ
+?internal_downgrade@spin_rw_mutex_v3@tbb@@AEAAXXZ
+?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ
+?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ
+?internal_release_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ
+?internal_release_writer@spin_rw_mutex_v3@tbb@@AEAAXXZ
+?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AEAA_NXZ
+?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ
+
+; spin_mutex.cpp
+?internal_construct@spin_mutex@tbb@@QEAAXXZ
+?internal_acquire@scoped_lock@spin_mutex@tbb@@AEAAXAEAV23@@Z
+?internal_release@scoped_lock@spin_mutex@tbb@@AEAAXXZ
+?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AEAA_NAEAV23@@Z
+
+; mutex.cpp
+?internal_acquire@scoped_lock@mutex@tbb@@AEAAXAEAV23@@Z
+?internal_release@scoped_lock@mutex@tbb@@AEAAXXZ
+?internal_try_acquire@scoped_lock@mutex@tbb@@AEAA_NAEAV23@@Z
+?internal_construct@mutex@tbb@@AEAAXXZ
+?internal_destroy@mutex@tbb@@AEAAXXZ
+
+; recursive_mutex.cpp
+?internal_construct@recursive_mutex@tbb@@AEAAXXZ
+?internal_destroy@recursive_mutex@tbb@@AEAAXXZ
+?internal_acquire@scoped_lock@recursive_mutex@tbb@@AEAAXAEAV23@@Z
+?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AEAA_NAEAV23@@Z
+?internal_release@scoped_lock@recursive_mutex@tbb@@AEAAXXZ
+
+; queuing_mutex.cpp
+?internal_construct@queuing_mutex@tbb@@QEAAXXZ
+?acquire@scoped_lock@queuing_mutex@tbb@@QEAAXAEAV23@@Z
+?release@scoped_lock@queuing_mutex@tbb@@QEAAXXZ
+?try_acquire@scoped_lock@queuing_mutex@tbb@@QEAA_NAEAV23@@Z
+
+;critical_section.cpp
+?internal_construct@critical_section_v4@internal@tbb@@QEAAXXZ
+
+#if !TBB_NO_LEGACY
+; concurrent_hash_map.cpp
+?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QEBA_NXZ
+
+; concurrent_queue.cpp v2
+??0concurrent_queue_base@internal@tbb@@IEAA@_K@Z
+??0concurrent_queue_iterator_base@internal@tbb@@IEAA@AEBVconcurrent_queue_base@12@@Z
+??1concurrent_queue_base@internal@tbb@@MEAA@XZ
+??1concurrent_queue_iterator_base@internal@tbb@@IEAA@XZ
+?advance@concurrent_queue_iterator_base@internal@tbb@@IEAAXXZ
+?assign@concurrent_queue_iterator_base@internal@tbb@@IEAAXAEBV123@@Z
+?internal_pop@concurrent_queue_base@internal@tbb@@IEAAXPEAX@Z
+?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IEAA_NPEAX@Z
+?internal_push@concurrent_queue_base@internal@tbb@@IEAAXPEBX@Z
+?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IEAA_NPEBX@Z
+?internal_set_capacity@concurrent_queue_base@internal@tbb@@IEAAX_J_K@Z
+?internal_size@concurrent_queue_base@internal@tbb@@IEBA_JXZ
+#endif
+
+; concurrent_queue v3
+??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@@Z
+??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@_K@Z
+??1concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@XZ
+?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXAEBV123@@Z
+?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXXZ
+??0concurrent_queue_base_v3@internal@tbb@@IEAA@_K@Z
+??1concurrent_queue_base_v3@internal@tbb@@MEAA@XZ
+?internal_push@concurrent_queue_base_v3@internal@tbb@@IEAAXPEBX@Z
+?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEBX@Z
+?internal_pop@concurrent_queue_base_v3@internal@tbb@@IEAAXPEAX@Z
+?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEAX@Z
+?internal_size@concurrent_queue_base_v3@internal@tbb@@IEBA_JXZ
+?internal_empty@concurrent_queue_base_v3@internal@tbb@@IEBA_NXZ
+?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ
+?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IEAAX_J_K@Z
+?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IEBAXXZ
+?assign@concurrent_queue_base_v3@internal@tbb@@IEAAXAEBV123@@Z
+
+#if !TBB_NO_LEGACY
+; concurrent_vector.cpp v2
+?internal_assign@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z
+?internal_capacity@concurrent_vector_base@internal@tbb@@IEBA_KXZ
+?internal_clear@concurrent_vector_base@internal@tbb@@IEAAXP6AXPEAX_K@Z_N@Z
+?internal_copy@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z
+?internal_grow_by@concurrent_vector_base@internal@tbb@@IEAA_K_K0P6AXPEAX0@Z@Z
+?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IEAAX_K0P6AXPEAX0@Z@Z
+?internal_push_back@concurrent_vector_base@internal@tbb@@IEAAPEAX_KAEA_K@Z
+?internal_reserve@concurrent_vector_base@internal@tbb@@IEAAX_K00@Z
+#endif
+
+; concurrent_vector v3
+??1concurrent_vector_base_v3@internal@tbb@@IEAA@XZ
+?internal_assign@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z
+?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IEBA_KXZ
+?internal_clear@concurrent_vector_base_v3@internal@tbb@@IEAA_KP6AXPEAX_K@Z@Z
+?internal_copy@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z
+?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z
+?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IEAAX_K0P6AXPEAXPEBX0@Z2@Z
+?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KAEA_K@Z
+?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00@Z
+?internal_compact@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KPEAXP6AX10@ZP6AX1PEBX0@Z@Z
+?internal_swap@concurrent_vector_base_v3@internal@tbb@@IEAAXAEAV123@@Z
+?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IEBAX_K@Z
+?internal_resize@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00PEBXP6AXPEAX0@ZP6AX210@Z@Z
+?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z
+
+; tbb_thread
+?allocate_closure_v3@internal@tbb@@YAPEAX_K@Z
+?detach@tbb_thread_v3@internal@tbb@@QEAAXXZ
+?free_closure_v3@internal@tbb@@YAXPEAX@Z
+?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ
+?internal_start@tbb_thread_v3@internal@tbb@@AEAAXP6AIPEAX@Z0@Z
+?join@tbb_thread_v3@internal@tbb@@QEAAXXZ
+?move_v3@internal@tbb@@YAXAEAVtbb_thread_v3@12@0@Z
+?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ
+?thread_sleep_v3@internal@tbb@@YAXAEBVinterval_t@tick_count@2@@Z
+?thread_yield_v3@internal@tbb@@YAXXZ
+
+; condition_variable
+?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z
+?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAEATcondvar_impl_t@123@PEAVmutex@3@PEBVinterval_t@tick_count@3@@Z
+?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z
+?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z
+?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbb/xbox360-tbb-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbb/xbox360-tbb-export.def
new file mode 100644 (file)
index 0000000..ac5f14e
--- /dev/null
@@ -0,0 +1,234 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+; Assembly-language support that is called directly by clients
+;__TBB_machine_cmpswp1
+;__TBB_machine_cmpswp2
+;__TBB_machine_cmpswp4
+;__TBB_machine_cmpswp8
+;__TBB_machine_fetchadd1
+;__TBB_machine_fetchadd2
+;__TBB_machine_fetchadd4
+;__TBB_machine_fetchadd8
+;__TBB_machine_fetchstore1
+;__TBB_machine_fetchstore2
+;__TBB_machine_fetchstore4
+;__TBB_machine_fetchstore8
+;__TBB_machine_store8
+;__TBB_machine_load8
+;__TBB_machine_trylockbyte
+
+; cache_aligned_allocator.cpp
+?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z @1
+?NFS_GetLineSize@internal@tbb@@YAIXZ @2
+?NFS_Free@internal@tbb@@YAXPAX@Z @3 
+?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z @4
+?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z @5
+?is_malloc_used_v3@internal@tbb@@YA_NXZ @6
+
+; task.cpp v3
+?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z @7
+?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z @8
+?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z @9
+?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z @10
+?destroy@task@tbb@@QAAXAAV12@@Z @11
+?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z @12
+?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z @13
+?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z @14
+?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z @15
+?internal_set_ref_count@task@tbb@@AAAXH@Z @16
+?is_owned_by_current_thread@task@tbb@@QBA_NXZ @17
+?note_affinity@task@tbb@@UAAXG@Z @18
+?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z @19
+?self@task@tbb@@SAAAV12@XZ @20
+?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z @21
+?default_num_threads@task_scheduler_init@tbb@@SAHXZ @22
+?initialize@task_scheduler_init@tbb@@QAAXHI@Z @23
+?initialize@task_scheduler_init@tbb@@QAAXH@Z @24
+?terminate@task_scheduler_init@tbb@@QAAXXZ @25
+?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z @26
+
+; exception handling support
+?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z @27
+?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z @28
+?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ @29
+?cancel_group_execution@task_group_context@tbb@@QAA_NXZ @30
+?reset@task_group_context@tbb@@QAAXXZ @31
+?init@task_group_context@tbb@@IAAXXZ @32
+??1task_group_context@tbb@@QAA@XZ @33
+?name@captured_exception@tbb@@UBAPBDXZ @34
+?what@captured_exception@tbb@@UBAPBDXZ @35   
+??1captured_exception@tbb@@UAA@XZ @36
+
+; tbb_misc.cpp
+?assertion_failure@tbb@@YAXPBDH00@Z @37
+?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ @38
+?handle_perror@internal@tbb@@YAXHPBD@Z @39
+?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z @40
+?runtime_warning@internal@tbb@@YAXPBDZZ @41
+
+; itt_notify.cpp
+?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z @42
+?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z @43
+
+; pipeline.cpp
+??0pipeline@tbb@@QAA@XZ @44
+??1filter@tbb@@UAA@XZ @45
+??1pipeline@tbb@@UAA@XZ @46   
+??_7pipeline@tbb@@6B@ @47
+?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z @48
+?clear@pipeline@tbb@@QAAXXZ @49
+?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z @50
+?run@pipeline@tbb@@QAAXI@Z @51
+
+; queuing_rw_mutex.cpp
+?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z @52
+?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @53
+?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ @54
+?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @55
+?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z @56
+
+#if !TBB_NO_LEGACY
+; spin_rw_mutex.cpp v2
+?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @57
+?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @58
+?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z @59
+?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z @60
+?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @61   
+?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z @62
+?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z @63
+?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @64
+?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z @65
+#endif
+
+; spin_rw_mutex v3
+?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ @66
+?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ @67
+?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @68
+?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @69
+?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @70
+?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ @71
+?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ @72
+?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @73
+
+; spin_mutex.cpp
+?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z @74
+?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ @75
+?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z @76
+
+; mutex.cpp
+?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z @77
+?internal_release@scoped_lock@mutex@tbb@@AAAXXZ @78
+?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z @79
+?internal_construct@mutex@tbb@@AAAXXZ @80
+?internal_destroy@mutex@tbb@@AAAXXZ @81
+
+; recursive_mutex.cpp
+?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z @82 
+?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ @83
+?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z @84
+?internal_construct@recursive_mutex@tbb@@AAAXXZ @85
+?internal_destroy@recursive_mutex@tbb@@AAAXXZ @86
+
+; queuing_mutex.cpp
+?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z @87
+?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ @88
+?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z @89
+
+; concurrent_hash_map.cpp
+?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ @90
+
+#if !TBB_NO_LEGACY
+; concurrent_queue.cpp v2
+?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ @91
+?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z @92
+?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ @93
+??0concurrent_queue_base@internal@tbb@@IAA@I@Z @94
+??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z @95
+??1concurrent_queue_base@internal@tbb@@MAA@XZ @96
+??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ @97
+?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z @98
+?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z @99
+?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z @100
+?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z @101
+?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z @102
+#endif
+
+; concurrent_queue v3
+??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ @103
+??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z @104
+?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ @105
+?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z @106
+??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z @107
+??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ @108
+?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z @109
+?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z @110
+?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z @111
+?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z @112
+?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ @113
+?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z @114
+?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ @115
+?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ @116
+
+#if !TBB_NO_LEGACY
+; concurrent_vector.cpp v2
+?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @117
+?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ @118
+?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z @119
+?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @120
+?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z @121
+?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z @122
+?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z @123
+?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z @124
+#endif
+
+; concurrent_vector v3
+??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ @125
+?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @126
+?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ @127
+?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z @128
+?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @129
+?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z @130
+?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z @131
+?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z @132
+?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z @133
+?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z @134
+?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z @135
+?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z @136
+
+; tbb_thread
+?join@tbb_thread_v3@internal@tbb@@QAAXXZ @137
+?detach@tbb_thread_v3@internal@tbb@@QAAXXZ @138
+?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z @139
+?allocate_closure_v3@internal@tbb@@YAPAXI@Z @140
+?free_closure_v3@internal@tbb@@YAXPAX@Z @141
+?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ @142
+?thread_yield_v3@internal@tbb@@YAXXZ @143
+?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z @144
+?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z @145
+?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ @146
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Customize.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Customize.h
new file mode 100644 (file)
index 0000000..2ad17a9
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_malloc_Customize_H_
+#define _TBB_malloc_Customize_H_
+
+/* Thread shutdown notification callback */
+/* redefine the name of the callback to meet TBB requirements
+   for externally visible names of service functions */
+#define mallocThreadShutdownNotification __TBB_mallocThreadShutdownNotification
+#define mallocProcessShutdownNotification __TBB_mallocProcessShutdownNotification
+
+extern "C" void mallocThreadShutdownNotification(void *);
+extern "C" void mallocProcessShutdownNotification(void);
+
+// customizing MALLOC_ASSERT macro
+#include "tbb/tbb_stddef.h"
+#define MALLOC_ASSERT(assertion, message) __TBB_ASSERT(assertion, message)
+
+#ifndef MALLOC_DEBUG
+#define MALLOC_DEBUG TBB_USE_DEBUG
+#endif
+
+#include "tbb/tbb_machine.h"
+
+#if DO_ITT_NOTIFY
+#include "tbb/itt_notify.h"
+#define MALLOC_ITT_SYNC_PREPARE(pointer) ITT_NOTIFY(sync_prepare, (pointer))
+#define MALLOC_ITT_SYNC_ACQUIRED(pointer) ITT_NOTIFY(sync_acquired, (pointer))
+#define MALLOC_ITT_SYNC_RELEASING(pointer) ITT_NOTIFY(sync_releasing, (pointer))
+#define MALLOC_ITT_SYNC_CANCEL(pointer) ITT_NOTIFY(sync_cancel, (pointer))
+#else
+#define MALLOC_ITT_SYNC_PREPARE(pointer) ((void)0)
+#define MALLOC_ITT_SYNC_ACQUIRED(pointer) ((void)0)
+#define MALLOC_ITT_SYNC_RELEASING(pointer) ((void)0)
+#define MALLOC_ITT_SYNC_CANCEL(pointer) ((void)0)
+#endif
+
+//! Stripped down version of spin_mutex.
+/** Instances of MallocMutex must be declared in memory that is zero-initialized.
+    There are no constructors.  This is a feature that lets it be
+    used in situations where the mutex might be used while file-scope constructors
+    are running.
+
+    There are no methods "acquire" or "release".  The scoped_lock must be used
+    in a strict block-scoped locking pattern.  Omitting these methods permitted
+    further simplication. */
+class MallocMutex {
+    unsigned char value;
+
+    //! Deny assignment
+    void operator=( MallocMutex& MallocMutex );
+public:
+    class scoped_lock {
+        const unsigned char value;
+        MallocMutex& mutex;
+    public:
+        scoped_lock( MallocMutex& m ) : value( __TBB_LockByte(m.value)), mutex(m) {}
+        ~scoped_lock() { __TBB_store_with_release(mutex.value, value); }
+    };
+    friend class scoped_lock;
+};
+
+inline intptr_t AtomicIncrement( volatile intptr_t& counter ) {
+    return __TBB_FetchAndAddW( &counter, 1 )+1;
+}
+
+inline uintptr_t AtomicAdd( volatile uintptr_t& counter, uintptr_t value ) {
+    return __TBB_FetchAndAddW( &counter, value );
+}
+
+inline intptr_t AtomicCompareExchange( volatile intptr_t& location, intptr_t new_value, intptr_t comparand) {
+    return __TBB_CompareAndSwapW( &location, new_value, comparand );
+}
+
+inline intptr_t FencedLoad( const volatile intptr_t &location ) {
+    return __TBB_load_with_acquire(location);
+}
+
+inline void FencedStore( volatile intptr_t &location, intptr_t value ) {
+    __TBB_store_with_release(location, value);
+}
+
+#define USE_DEFAULT_MEMORY_MAPPING 1
+
+// To support malloc replacement with LD_PRELOAD
+#include "proxy.h"
+
+#if MALLOC_LD_PRELOAD
+#define malloc_proxy __TBB_malloc_proxy
+extern "C" void * __TBB_malloc_proxy(size_t)  __attribute__ ((weak));
+#else
+const bool malloc_proxy = false;
+#endif
+
+namespace rml {
+namespace internal {
+    void init_tbbmalloc();
+} } // namespaces
+
+#define MALLOC_EXTRA_INITIALIZATION rml::internal::init_tbbmalloc()
+
+#endif /* _TBB_malloc_Customize_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/LifoList.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/LifoList.h
new file mode 100644 (file)
index 0000000..e45934c
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _itt_common_malloc_LifoList_H_
+#define _itt_common_malloc_LifoList_H_
+
+#include "TypeDefinitions.h"
+#include <string.h> // for memset()
+
+//! Checking the synchronization method
+/** FINE_GRAIN_LOCKS is the only variant for now; should be defined for LifoList */
+#ifndef FINE_GRAIN_LOCKS
+#define FINE_GRAIN_LOCKS
+#endif
+
+namespace rml {
+
+namespace internal {
+
+class LifoList {
+public:
+    inline LifoList();
+    inline void push(void** ptr);
+    inline void* pop(void);
+    inline void pushList(void **head, void **tail);
+
+private:
+    void * top;
+#ifdef FINE_GRAIN_LOCKS
+    MallocMutex lock;
+#endif /* FINE_GRAIN_LOCKS     */
+};
+
+#ifdef FINE_GRAIN_LOCKS
+/* LifoList assumes zero initialization so a vector of it can be created
+ * by just allocating some space with no call to constructor.
+ * On Linux, it seems to be necessary to avoid linking with C++ libraries.
+ *
+ * By usage convention there is no race on the initialization. */
+LifoList::LifoList( ) : top(NULL)
+{
+    // MallocMutex assumes zero initialization
+    memset(&lock, 0, sizeof(MallocMutex));
+}
+
+void LifoList::push( void **ptr )
+{   
+    MallocMutex::scoped_lock scoped_cs(lock);
+    *ptr = top;
+    top = ptr;
+}
+
+void LifoList::pushList( void **head, void **tail )
+{   
+    MallocMutex::scoped_lock scoped_cs(lock);
+    *tail = top;
+    top = head;
+}
+
+void * LifoList::pop( )
+{   
+    void **result=NULL;
+    if (!top) goto done;
+    {
+        MallocMutex::scoped_lock scoped_cs(lock);
+        if (!top) goto done;
+        result = (void **) top;
+        top = *result;
+    } 
+    *result = NULL;
+done:
+    return result;
+}
+
+#endif /* FINE_GRAIN_LOCKS     */
+
+} // namespace internal
+} // namespace rml
+
+#endif /* _itt_common_malloc_LifoList_H_ */
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/MapMemory.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/MapMemory.h
new file mode 100644 (file)
index 0000000..26d85f2
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _itt_shared_malloc_MapMemory_H
+#define _itt_shared_malloc_MapMemory_H
+
+#if __linux__ || __APPLE__ || __sun || __FreeBSD__
+
+#if __sun && !defined(_XPG4_2)
+ // To have void* as mmap's 1st argument
+ #define _XPG4_2 1
+ #define XPG4_WAS_DEFINED 1
+#endif
+
+#include <sys/mman.h>
+
+#if XPG4_WAS_DEFINED
+ #undef _XPG4_2
+ #undef XPG4_WAS_DEFINED
+#endif
+
+#define MEMORY_MAPPING_USES_MALLOC 0
+void* MapMemory (size_t bytes)
+{
+    void* result = 0;
+#ifndef MAP_ANONYMOUS
+// Mac OS* X defines MAP_ANON, which is deprecated in Linux.
+#define MAP_ANONYMOUS MAP_ANON
+#endif /* MAP_ANONYMOUS */
+    result = mmap(result, bytes, (PROT_READ | PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+    return result==MAP_FAILED? 0: result;
+}
+
+int UnmapMemory(void *area, size_t bytes)
+{
+    return munmap(area, bytes);
+}
+
+#elif (_WIN32 || _WIN64) && !_XBOX
+#include <windows.h>
+
+#define MEMORY_MAPPING_USES_MALLOC 0
+void* MapMemory (size_t bytes)
+{
+    /* Is VirtualAlloc thread safe? */
+    return VirtualAlloc(NULL, bytes, (MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN), PAGE_READWRITE);
+}
+
+int UnmapMemory(void *area, size_t bytes)
+{
+    BOOL result = VirtualFree(area, 0, MEM_RELEASE);
+    return !result;
+}
+
+#else
+#include <stdlib.h>
+
+#define MEMORY_MAPPING_USES_MALLOC 1
+void* MapMemory (size_t bytes)
+{
+    return malloc( bytes );
+}
+
+int UnmapMemory(void *area, size_t bytes)
+{
+    free( area );
+    return 0;
+}
+
+#endif /* OS dependent */
+
+#if MALLOC_CHECK_RECURSION && MEMORY_MAPPING_USES_MALLOC
+#error Impossible to protect against malloc recursion when memory mapping uses malloc.
+#endif
+
+#endif /* _itt_shared_malloc_MapMemory_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Statistics.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/Statistics.h
new file mode 100644 (file)
index 0000000..9b59d0e
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define MAX_THREADS 1024
+#define NUM_OF_BINS 30
+#define ThreadCommonCounters NUM_OF_BINS
+
+enum counter_type {
+    allocBlockNew = 0,
+    allocBlockPublic,
+    allocBumpPtrUsed,
+    allocFreeListUsed,
+    allocPrivatized,
+    examineEmptyEnough,
+    examineNotEmpty,
+    freeRestoreBumpPtr,
+    freeByOtherThread,
+    freeToActiveBlock,
+    freeToInactiveBlock,
+    freeBlockPublic,
+    freeBlockBack,
+    MaxCounters
+};
+enum common_counter_type {
+    allocNewLargeObj = 0,
+    allocCachedLargeObj,
+    cacheLargeObj,
+    freeLargeObj,
+    lockPublicFreeList,
+    freeToOtherThread
+};
+
+#if COLLECT_STATISTICS
+/* Statistics reporting callback registred via a static object dtor 
+   on Posix or DLL_PROCESS_DETACH on Windows.
+ */
+
+static bool reportAllocationStatistics;
+
+struct bin_counters {
+    int counter[MaxCounters];
+};
+
+static bin_counters statistic[MAX_THREADS][NUM_OF_BINS+1]; //zero-initialized;
+
+static inline int STAT_increment(int thread, int bin, int ctr)
+{
+    return reportAllocationStatistics && thread < MAX_THREADS ? ++(statistic[thread][bin].counter[ctr]) : 0;
+}
+
+static inline void initStatisticsCollection() {
+#if defined(MALLOCENV_COLLECT_STATISTICS)
+    if (NULL != getenv(MALLOCENV_COLLECT_STATISTICS))
+        reportAllocationStatistics = true;
+#endif
+}
+
+#else
+#define STAT_increment(a,b,c) ((void)0)
+#endif /* COLLECT_STATISTICS */
+
+static inline void STAT_print(int thread)
+{
+#if COLLECT_STATISTICS
+    if (!reportAllocationStatistics)
+        return;
+
+    char filename[100];
+#if USE_PTHREAD
+    sprintf(filename, "stat_ScalableMalloc_proc%04d_thr%04d.log", getpid(), thread);
+#else
+    sprintf(filename, "stat_ScalableMalloc_thr%04d.log", thread);
+#endif
+    FILE* outfile = fopen(filename, "w");
+    for(int i=0; i<NUM_OF_BINS; ++i)
+    {
+        bin_counters& ctrs = statistic[thread][i];
+        fprintf(outfile, "Thr%04d Bin%02d", thread, i);
+        fprintf(outfile, ": allocNewBlocks %5d", ctrs.counter[allocBlockNew]);
+        fprintf(outfile, ", allocPublicBlocks %5d", ctrs.counter[allocBlockPublic]);
+        fprintf(outfile, ", restoreBumpPtr %5d", ctrs.counter[freeRestoreBumpPtr]);
+        fprintf(outfile, ", privatizeCalled %10d", ctrs.counter[allocPrivatized]);
+        fprintf(outfile, ", emptyEnough %10d", ctrs.counter[examineEmptyEnough]);
+        fprintf(outfile, ", notEmptyEnough %10d", ctrs.counter[examineNotEmpty]);
+        fprintf(outfile, ", freeBlocksPublic %5d", ctrs.counter[freeBlockPublic]);
+        fprintf(outfile, ", freeBlocksBack %5d", ctrs.counter[freeBlockBack]);
+        fprintf(outfile, "\n");
+    }
+    for(int i=0; i<NUM_OF_BINS; ++i)
+    {
+        bin_counters& ctrs = statistic[thread][i];
+        fprintf(outfile, "Thr%04d Bin%02d", thread, i);
+        fprintf(outfile, ": allocBumpPtr %10d", ctrs.counter[allocBumpPtrUsed]);
+        fprintf(outfile, ", allocFreeList %10d", ctrs.counter[allocFreeListUsed]);
+        fprintf(outfile, ", freeToActiveBlk %10d", ctrs.counter[freeToActiveBlock]);
+        fprintf(outfile, ", freeToInactive  %10d", ctrs.counter[freeToInactiveBlock]);
+        fprintf(outfile, ", freedByOther %10d", ctrs.counter[freeByOtherThread]);
+        fprintf(outfile, "\n");
+    }
+    bin_counters& ctrs = statistic[thread][ThreadCommonCounters];
+    fprintf(outfile, "Thr%04d common counters", thread);
+    fprintf(outfile, ": allocNewLargeObject %5d", ctrs.counter[allocNewLargeObj]);
+    fprintf(outfile, ": allocCachedLargeObject %5d", ctrs.counter[allocCachedLargeObj]);
+    fprintf(outfile, ", cacheLargeObject %5d", ctrs.counter[cacheLargeObj]);
+    fprintf(outfile, ", freeLargeObject %5d", ctrs.counter[freeLargeObj]);
+    fprintf(outfile, ", lockPublicFreeList %5d", ctrs.counter[lockPublicFreeList]);
+    fprintf(outfile, ", freeToOtherThread %10d", ctrs.counter[freeToOtherThread]);
+    fprintf(outfile, "\n");
+
+    fclose(outfile);
+#endif
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h
new file mode 100644 (file)
index 0000000..4442d96
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _itt_shared_malloc_TypeDefinitions_H_
+#define _itt_shared_malloc_TypeDefinitions_H_
+
+// Define preprocessor symbols used to determine architecture
+#if _WIN32||_WIN64
+#   if defined(_M_AMD64)||defined(__MINGW64__) // the latter for MinGW support
+#       define __ARCH_x86_64 1
+#   elif defined(_M_IA64)
+#       define __ARCH_ipf 1
+#   elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support
+#       define __ARCH_x86_32 1
+#   else
+#       error Unknown processor architecture for Windows
+#   endif
+#   define USE_WINTHREAD 1
+#else /* Assume generic Unix */
+#   if __x86_64__
+#       define __ARCH_x86_64 1
+#   elif __ia64__
+#       define __ARCH_ipf 1
+#   elif __i386__ || __i386
+#       define __ARCH_x86_32 1
+#   else
+#       define __ARCH_other 1
+#   endif
+#   define USE_PTHREAD 1
+#endif
+
+// Include files containing declarations of intptr_t and uintptr_t
+#include <stddef.h>  // size_t
+#if _MSC_VER
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#else
+#include <stdint.h>
+#endif
+
+namespace rml {
+namespace internal {
+
+extern bool  original_malloc_found;
+extern void* (*original_malloc_ptr)(size_t);
+extern void  (*original_free_ptr)(void*);
+
+} } // namespaces
+
+//! PROVIDE YOUR OWN Customize.h IF YOU FEEL NECESSARY
+#include "Customize.h"
+
+/*
+ * Functions to align an integer down or up to the given power of two,
+ * and test for such an alignment, and for power of two.
+ */
+template<typename T>
+static inline T alignDown(T arg, uintptr_t alignment) {
+    return T( (uintptr_t)arg                & ~(alignment-1));
+}
+template<typename T>
+static inline T alignUp  (T arg, uintptr_t alignment) {
+    return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1));
+    // /*is this better?*/ return (((uintptr_t)arg-1) | (alignment-1)) + 1;
+}
+template<typename T>
+static inline bool isAligned(T arg, uintptr_t alignment) {
+    return 0==((uintptr_t)arg & (alignment-1));
+}
+static inline bool isPowerOfTwo(uintptr_t arg) {
+    return arg && (0==(arg & (arg-1)));
+}
+static inline bool isPowerOfTwoMultiple(uintptr_t arg, uintptr_t divisor) {
+    // Divisor is assumed to be a power of two (which is valid for current uses).
+    MALLOC_ASSERT( isPowerOfTwo(divisor), "Divisor should be a power of two" );
+    return arg && (0==(arg & (arg-divisor)));
+}
+
+#endif /* _itt_shared_malloc_TypeDefinitions_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backend.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backend.cpp
new file mode 100644 (file)
index 0000000..7db8b65
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// intrin.h available since VS2005
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+#define __TBB_HAS_INTRIN_H 1
+#else
+#define __TBB_HAS_INTRIN_H 0
+#endif
+
+#if __TBB_HAS_INTRIN_H
+#include <intrin.h>   /* for __cpuid */
+#endif
+
+#include "tbbmalloc_internal.h"
+//! Define the main synchronization method
+/** It should be specified before including LifoList.h */
+#define FINE_GRAIN_LOCKS
+#include "LifoList.h"
+
+
+namespace rml {
+namespace internal {
+
+// If USE_MALLOC_FOR_LARGE_OBJECT is nonzero, then large allocations are done via malloc.
+// Otherwise large allocations are done using the scalable allocator's block allocator.
+// As of 06.Jun.17, using malloc is about 10x faster on Linux.
+#if !_WIN32
+#define USE_MALLOC_FOR_LARGE_OBJECT 1
+#endif
+
+/*********** Code to acquire memory from the OS or other executive ****************/
+
+#if USE_DEFAULT_MEMORY_MAPPING
+#include "MapMemory.h"
+#else
+/* assume MapMemory and UnmapMemory are customized */
+#endif
+
+#if USE_MALLOC_FOR_LARGE_OBJECT
+
+// (get|free)RawMemory only necessary for the USE_MALLOC_FOR_LARGE_OBJECT case
+void* getRawMemory (size_t size, bool useMapMem = false)
+{
+    void *object;
+
+    if (useMapMem) 
+        object = MapMemory(size);
+    else
+#if MALLOC_CHECK_RECURSION
+    if (RecursiveMallocCallProtector::noRecursion())
+        object = malloc(size);
+    else if ( rml::internal::original_malloc_found )
+        object = (*rml::internal::original_malloc_ptr)(size);
+    else
+        object = MapMemory(size);
+#else
+    object = malloc(size);
+#endif /* MALLOC_CHECK_RECURSION */
+    return object;
+}
+
+void freeRawMemory (void *object, size_t size, bool useMapMem)
+{
+    if (useMapMem)
+        UnmapMemory(object, size);
+    else
+#if MALLOC_CHECK_RECURSION
+    if (RecursiveMallocCallProtector::noRecursion())
+        free(object);
+    else if ( rml::internal::original_malloc_found )
+        (*rml::internal::original_free_ptr)(object);
+    else
+        UnmapMemory(object, size);
+#else
+    free(object);
+#endif /* MALLOC_CHECK_RECURSION */
+}
+
+#else /* USE_MALLOC_FOR_LARGE_OBJECT */
+
+void* getRawMemory (size_t size, bool = false) { return MapMemory(size); }
+
+void freeRawMemory (void *object, size_t size, bool) {
+    UnmapMemory(object, size);
+}
+
+#endif /* USE_MALLOC_FOR_LARGE_OBJECT */
+
+/********* End memory acquisition code ********************************/
+
+static unsigned int getCPUid()
+{
+    unsigned int id;
+
+#if (__ARCH_x86_32||__ARCH_x86_64) && (__linux__||__APPLE__||__FreeBSD__||__sun||__MINGW32__)
+    int res;
+ #if __ARCH_x86_32
+    /* EBX used for PIC support. Having EAX in output operands 
+       prevents ICC from crash like in __TBB_ICC_ASM_VOLATILE_BROKEN. */
+    int _eax, _ecx, _edx;
+    __asm__ ("xchgl %%ebx, %1\n\t"
+             "cpuid\n\t"
+             "xchgl %%ebx, %1\n\t"
+             : "=a" (_eax), "=r" (res)
+             : "a" (1) : "ecx", "edx");
+ #else
+    __asm__ ("cpuid\n\t"
+             : "=b" (res)
+             : "a" (1) );
+ #endif // __ARCH_x86_32
+    id = (res >> 24) & 0xff;
+#elif _WIN32 || _WIN64
+ #if __TBB_HAS_INTRIN_H
+    int CPUInfo[4];
+    __cpuid(CPUInfo, 1);
+    id = (CPUInfo[1] >> 24) & 0xff;
+ #else
+    int res;
+    _asm {
+        push ebx
+        push ecx
+        mov  eax,1
+        cpuid
+        mov  res,ebx
+        pop  ecx
+        pop  ebx
+    }
+    id = (res >> 24) & 0xff;
+ #endif
+# else
+    id = getThreadId();
+#endif
+    return id;
+}
+
+
+/* 
+ * To decrease contention for free blocks, free blocks are split, and access
+ * to them is based on process number.
+ */
+const int numOfFreeBlockLists = 4;
+
+/*
+ * This is a LIFO linked list that one can init, push or pop from
+ */
+static LifoList freeBlockList[numOfFreeBlockLists];
+
+FreeBlocks freeBlocks;
+
+bool FreeBlocks::bootstrap(RawAlloc myAlloc, RawFree myFree, size_t /*myReqSize*/)
+{
+    if (!myAlloc && !myFree) {
+        rawAlloc = getRawMemory;
+        rawFree = freeRawMemory;
+        // Get virtual memory in pieces of this size: 0x0100000 is 1 megabyte decimal
+        memReqSize = 0x0100000;
+    } else
+        MALLOC_ASSERT(0, "Not implemented yet.");
+    return mallocBigBlock();
+}
+
+BlockI *FreeBlocks::get(bool startup)
+{
+    BlockI *bigBlock;
+    // must not call getCPUid during malloc initialization 
+    // because getCPUid can call malloc
+    const unsigned myFreeList = startup? 0 : getCPUid()%numOfFreeBlockLists;
+    unsigned currListIdx = myFreeList;
+
+    do {
+        if (bigBlock = (BlockI *) freeBlockList[currListIdx].pop()) {
+            MALLOC_ITT_SYNC_ACQUIRED(freeBlockList+currListIdx);
+            break;
+        }
+        currListIdx = (currListIdx+1) % numOfFreeBlockLists;
+    } while (currListIdx != myFreeList);
+
+    while (!bigBlock) {
+        /* We are out of blocks so go to the OS and get another one */
+        if (!mallocBigBlock()) return NULL;
+
+        bigBlock = (BlockI *) freeBlockList[myFreeList].pop();
+        if (bigBlock)
+            MALLOC_ITT_SYNC_ACQUIRED(freeBlockList+myFreeList);
+    }
+
+    return bigBlock;
+}
+
+void FreeBlocks::put(BlockI *ptr, bool startup)
+{
+    unsigned myFreeList = startup? 0 : getCPUid()%numOfFreeBlockLists;
+    MALLOC_ITT_SYNC_RELEASING(freeBlockList+myFreeList);
+    freeBlockList[myFreeList].push((void **)ptr);
+}
+
+void FreeBlocks::putList(BlockI *head, BlockI *tail)
+{
+    unsigned myFreeList = getCPUid()%numOfFreeBlockLists;
+    MALLOC_ITT_SYNC_RELEASING(freeBlockList+myFreeList);
+    freeBlockList[myFreeList].pushList((void**)head, (void**)tail);
+}
+
+/*
+ * Big Blocks are the blocks we get from the OS or some similar place using getMemory above.
+ * They are placed on the freeBlockList once they are acquired.
+ */
+bool FreeBlocks::mallocBigBlock()
+{
+/* Divide the big block into smaller bigBlocks that hold that many blocks.
+ * This is done since we really need a lot of blocks on the freeBlockList 
+ * or there will be contention problems.
+ */
+    const unsigned int blocksPerBigBlock = 16/numOfFreeBlockLists;
+
+    void *unalignedBigBlock = (*rawAlloc)(memReqSize, /*useMapMem=*/true);
+
+    if (!unalignedBigBlock) {
+        TRACEF(( "[ScalableMalloc trace] in mallocBigBlock, getMemory returns 0\n" ));
+        /* We can't get any more memory from the OS or executive */
+        return false;
+    }
+
+    void *alignedBigBlock = alignUp(unalignedBigBlock, blockSize);
+    void *bigBlockCeiling = (void*)((uintptr_t)unalignedBigBlock + memReqSize);
+
+    size_t bigBlockSplitSize = blocksPerBigBlock * blockSize;
+
+    BlockI *splitBlock = (BlockI*)alignedBigBlock;
+
+    // distribute alignedBigBlock between all freeBlockList elements
+    for (unsigned currListIdx = 0;
+         ((uintptr_t)splitBlock + blockSize) <= (uintptr_t)bigBlockCeiling;
+         currListIdx = (currListIdx+1) % numOfFreeBlockLists) {
+        void *splitEdge = (void*)((uintptr_t)splitBlock + bigBlockSplitSize);
+        if( splitEdge > bigBlockCeiling) {
+            splitEdge = alignDown(bigBlockCeiling, blockSize);
+        }
+        ((BlockI*)splitBlock)->initialize(splitEdge);
+        MALLOC_ITT_SYNC_RELEASING(freeBlockList+currListIdx);
+        freeBlockList[currListIdx].push((void**) splitBlock);
+        splitBlock = (BlockI*)splitEdge;
+    }
+
+    TRACEF(( "[ScalableMalloc trace] in mallocBigBlock returning 1\n" ));
+    return true;
+}
+
+} } // namespaces
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backref.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/backref.cpp
new file mode 100644 (file)
index 0000000..7c5c944
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <string.h>
+#include <new>        /* for placement new */
+#include "tbbmalloc_internal.h"
+
+namespace rml {
+namespace internal {
+
+
+/********* backreferences ***********************/
+/* Each 16KB block and each large memory object header contains BackRefIdx 
+ * that points out in some BackRefBlock which points back to this block or header.
+ */
+struct BackRefBlock {
+    BackRefBlock *nextForUse;     // the next in the chain of blocks with free items
+    FreeObject   *bumpPtr;        // bump pointer moves from the end to the beginning of the block
+    FreeObject   *freeList;
+    int           allocatedCount; // the number of objects allocated
+    int           myNum;          // the index in the parent array
+    MallocMutex   blockMutex;
+    bool          addedToForUse;  // this block is already added to the listForUse chain
+
+    BackRefBlock(BackRefBlock *blockToUse, int myNum) :
+        nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + blockSize - sizeof(void*))),
+        freeList(NULL), allocatedCount(0), myNum(myNum), addedToForUse(false) {
+        // index in BackRefMaster must fit to uint16_t
+        MALLOC_ASSERT(!(myNum >> 16), ASSERT_TEXT); 
+    }
+
+    // when BackRefMaster::findFreeBlock() calls getRawBlock, 
+    // BackRefBlock::bytes is used implicitly
+    static const int bytes = blockSize;
+};
+
+// max number of backreference pointers in 16KB block
+static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*);
+
+struct BackRefMaster {
+/* A 16KB block can hold up to ~2K back pointers to 16KB blocks or large objects,
+ * so it can address at least 32MB. The array of 64KB holds 8K pointers
+ * to such blocks, addressing ~256 GB.
+ */
+    static const size_t bytes = 64*1024;
+    static const int dataSz;
+
+    BackRefBlock  *active;         // if defined, use it for allocations
+    BackRefBlock  *listForUse;     // the chain of data blocks with free items
+    int            lastUsed;       // index of the last used block
+    BackRefBlock  *backRefBl[1];   // the real size of the array is dataSz
+
+    BackRefBlock *findFreeBlock();
+    void          addBackRefBlockToList(BackRefBlock *bl);
+    void          addEmptyBackRefBlock(BackRefBlock *newBl);
+};
+
+const int BackRefMaster::dataSz
+    = 1+(BackRefMaster::bytes-sizeof(BackRefMaster))/sizeof(BackRefBlock*);
+
+static MallocMutex backRefMutex;
+static BackRefMaster *backRefMaster;
+
+bool initBackRefMaster()
+{
+    // reserve space for master table and 4 leaves taking into account VirtualAlloc allocation granularity
+    // MapMemory is forced because the function runs during startup.
+    const int leaves = 4;
+    if (! (backRefMaster = (BackRefMaster*)getRawMemory(BackRefMaster::bytes+leaves*BackRefBlock::bytes, /*useMapMem=*/true)))
+        return false;
+    backRefMaster->listForUse = NULL;
+    for (int i=0; i<leaves; i++) {
+        BackRefBlock *bl = (BackRefBlock *)((uintptr_t)backRefMaster + BackRefMaster::bytes + i*BackRefBlock::bytes);
+        backRefMaster->lastUsed = i;
+        backRefMaster->addEmptyBackRefBlock(bl);
+        if (i)
+            backRefMaster->addBackRefBlockToList(bl);
+        else // active leaf is not needed in listForUse
+            backRefMaster->active = bl;
+    }
+    return true;
+}
+
+void BackRefMaster::addBackRefBlockToList(BackRefBlock *bl)
+{
+    bl->nextForUse = backRefMaster->listForUse;
+    backRefMaster->listForUse = bl;
+    bl->addedToForUse = true;
+}
+
+void BackRefMaster::addEmptyBackRefBlock(BackRefBlock *newBl)
+{
+    memset(newBl, 0, BackRefBlock::bytes);
+    new (newBl) BackRefBlock(newBl, lastUsed);
+    backRefBl[lastUsed] = newBl;
+}
+
+BackRefBlock *BackRefMaster::findFreeBlock()
+{
+    if (active->allocatedCount < BR_MAX_CNT)
+        return active;
+        
+    if (listForUse) {                                   // use released list
+        active = listForUse;
+        listForUse = listForUse->nextForUse;
+        MALLOC_ASSERT(active->addedToForUse, ASSERT_TEXT);
+        active->addedToForUse = false;
+    } else if (lastUsed-1 < backRefMaster->dataSz) {    // allocate new data node
+        // TODO: this block is never released, so can prevent re-using
+        // of the memory it belong to in the backend, 
+        // getRawMemory can be used instead.
+        BackRefBlock *newBl = 
+            (BackRefBlock*)BlockI::getRawBlock( /*startup=*/!isMallocInitializedExt() );
+        if (!newBl) return NULL;
+        lastUsed++;
+        backRefMaster->addEmptyBackRefBlock(newBl);
+        active = newBl;
+    } else  // no free blocks, give up
+        return NULL;
+    return active;
+}
+
+void *getBackRef(BackRefIdx backRefIdx)
+{
+    // !backRefMaster means no initialization done, so it can't be valid memory
+    if (!backRefMaster || backRefIdx.getMaster() > backRefMaster->lastUsed
+        || backRefIdx.getOffset() >= BR_MAX_CNT) 
+        return NULL;
+    return *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()]
+                     + sizeof(BackRefBlock)+backRefIdx.getOffset()*sizeof(void*));
+}
+
+void setBackRef(BackRefIdx backRefIdx, void *newPtr)
+{
+    MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed && backRefIdx.getOffset()<BR_MAX_CNT,
+                  ASSERT_TEXT);
+    *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()]
+              + sizeof(BackRefBlock) + backRefIdx.getOffset()*sizeof(void*)) = newPtr;
+}
+
+BackRefIdx BackRefIdx::newBackRef(bool largeObj)
+{
+    BackRefBlock *blockToUse;
+    void **toUse;
+    BackRefIdx res;
+
+    do {
+        { // global lock taken to find a block
+            MallocMutex::scoped_lock lock(backRefMutex);
+
+            MALLOC_ASSERT(backRefMaster, ASSERT_TEXT);
+            if (! (blockToUse = backRefMaster->findFreeBlock()))
+                return BackRefIdx();
+        }
+        toUse = NULL;
+        { // the block is locked to find a reference
+            MallocMutex::scoped_lock lock(blockToUse->blockMutex);
+
+            if (blockToUse->freeList) {
+                toUse = (void**)blockToUse->freeList;
+                blockToUse->freeList = blockToUse->freeList->next;
+            } else if (blockToUse->allocatedCount < BR_MAX_CNT) {
+                toUse = (void**)blockToUse->bumpPtr;
+                blockToUse->bumpPtr = 
+                    (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*));
+                if (blockToUse->allocatedCount == BR_MAX_CNT-1) {
+                    MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr
+                                  < (uintptr_t)blockToUse+sizeof(BackRefBlock),
+                                  ASSERT_TEXT);
+                    blockToUse->bumpPtr = NULL;
+                }
+            }
+            if (toUse)
+                blockToUse->allocatedCount++;
+        } // end of lock scope
+    } while (!toUse);
+    res.master = blockToUse->myNum;
+    uintptr_t offset = 
+        ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*);
+    // Is offset too big?
+    MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT);
+    res.offset = offset;
+    if (largeObj) res.largeObj = largeObj;
+
+    return res;
+}
+
+void removeBackRef(BackRefIdx backRefIdx)
+{
+    MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed 
+                  && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
+    BackRefBlock *currBlock = backRefMaster->backRefBl[backRefIdx.getMaster()];
+    FreeObject *freeObj = (FreeObject*)((uintptr_t)currBlock + sizeof(BackRefBlock)
+                                        + backRefIdx.getOffset()*sizeof(void*));
+    {
+        MallocMutex::scoped_lock lock(currBlock->blockMutex);
+
+        freeObj->next = currBlock->freeList;
+        currBlock->freeList = freeObj;
+        currBlock->allocatedCount--;
+    }
+    // TODO: do we need double-check here?
+    if (!currBlock->addedToForUse && currBlock!=backRefMaster->active) {
+        MallocMutex::scoped_lock lock(backRefMutex);
+
+        if (!currBlock->addedToForUse && currBlock!=backRefMaster->active)
+            backRefMaster->addBackRefBlockToList(currBlock);
+    }
+}
+
+/********* End of backreferences ***********************/
+
+} // namespace internal
+} // namespace rml
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/frontend.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/frontend.cpp
new file mode 100644 (file)
index 0000000..5c7ba3d
--- /dev/null
@@ -0,0 +1,2057 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#include "tbbmalloc_internal.h"
+#include <errno.h>
+
+//! Define the main synchronization method
+/** It should be specified before including LifoList.h */
+#define FINE_GRAIN_LOCKS
+#include "LifoList.h"
+
+#if USE_PTHREAD
+    #define TlsSetValue_func pthread_setspecific
+    #define TlsGetValue_func pthread_getspecific
+    typedef pthread_key_t tls_key_t;
+    #include <sched.h>
+    inline void do_yield() {sched_yield();}
+
+#elif USE_WINTHREAD
+    #define _WIN32_WINNT 0x0400
+    #include "tbb/machine/windows_api.h"
+    #define TlsSetValue_func TlsSetValue
+    #define TlsGetValue_func TlsGetValue
+    typedef DWORD tls_key_t;
+    inline void do_yield() {SwitchToThread();}
+
+#else
+    #error Must define USE_PTHREAD or USE_WINTHREAD
+
+#endif
+
+
+#define FREELIST_NONBLOCKING 1
+
+void mallocThreadShutdownNotification(void* arg);
+
+namespace rml {
+namespace internal {
+
+class Block;
+
+#if MALLOC_CHECK_RECURSION
+
+inline bool isMallocInitialized();
+
+bool RecursiveMallocCallProtector::noRecursion() {
+    MALLOC_ASSERT(isMallocInitialized(), 
+                  "Recursion status can be checked only when initialization was done.");
+    return !mallocRecursionDetected;
+}
+
+#endif // MALLOC_CHECK_RECURSION
+
+/*
+ * Block::objectSize value used to mark blocks allocated by startupAlloc
+ */
+const unsigned int startupAllocObjSizeMark = ~(unsigned int)0;
+
+/*
+ * This number of bins in the TLS that leads to blocks that we can allocate in.
+ */
+const uint32_t numBlockBinLimit = 31;
+
+/*
+ * The following constant is used to define the size of struct Block, the block header.
+ * The intent is to have the size of a Block multiple of the cache line size, this allows us to
+ * get good alignment at the cost of some overhead equal to the amount of padding included in the Block.
+ */
+
+const int blockHeaderAlignment = 64; // a common size of a cache line
+
+
+/********* The data structures and global objects        **************/
+
+/*
+ * The malloc routines themselves need to be able to occasionally malloc some space,
+ * in order to set up the structures used by the thread local structures. This
+ * routine preforms that fuctions.
+ */
+class BootStrapBlocks {
+    MallocMutex bootStrapLock;
+    Block      *bootStrapBlock;
+    Block      *bootStrapBlockUsed;
+    FreeObject *bootStrapObjectList;
+public:
+    void *allocate(size_t size);
+    void free(void* ptr);
+};
+
+class ThreadId {
+    static tls_key_t Tid_key;
+    static intptr_t ThreadIdCount;
+
+    unsigned int id;
+public:
+
+    static void init() {
+#if USE_WINTHREAD
+        Tid_key = TlsAlloc();
+#else
+        int status = pthread_key_create( &Tid_key, NULL );
+        if ( status ) {
+            fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n");
+            exit(1);
+        }
+#endif /* USE_WINTHREAD */
+    }
+    static ThreadId get() {
+        ThreadId result;
+        result.id = reinterpret_cast<intptr_t>(TlsGetValue_func(Tid_key));
+        if( !result.id ) {
+            RecursiveMallocCallProtector scoped;
+            // Thread-local value is zero -> first call from this thread,
+            // need to initialize with next ID value (IDs start from 1)
+            result.id = AtomicIncrement(ThreadIdCount); // returned new value!
+            TlsSetValue_func( Tid_key, reinterpret_cast<void*>(result.id) );
+        }
+        return result;
+    }
+    bool defined() const { return id; }
+    void undef() { id = 0; }
+    void invalid() { id = (unsigned int)-1; }
+    bool own() const { return id == ThreadId::get().id; }
+
+    friend bool operator==(const ThreadId &id1, const ThreadId &id2);
+    friend unsigned int getThreadId();
+};
+
+tls_key_t ThreadId::Tid_key;
+intptr_t ThreadId::ThreadIdCount;
+
+bool operator==(const ThreadId &id1, const ThreadId &id2) {
+    return id1.id == id2.id;
+}
+
+unsigned int getThreadId() { return ThreadId::get().id; }
+
+/* The 'next' field in the block header has to maintain some invariants:
+ *   it needs to be on a 16K boundary and the first field in the block.
+ *   Any value stored there needs to have the lower 14 bits set to 0
+ *   so that various assert work. This means that if you want to smash this memory
+ *   for debugging purposes you will need to obey this invariant.
+ * The total size of the header needs to be a power of 2 to simplify
+ * the alignment requirements. For now it is a 128 byte structure.
+ * To avoid false sharing, the fields changed only locally are separated 
+ * from the fields changed by foreign threads.
+ * Changing the size of the block header would require to change
+ * some bin allocation sizes, in particular "fitting" sizes (see above).
+ */
+class Bin;
+class StartupBlock;
+struct TLSData;
+
+class LocalBlockFields : public BlockI {
+protected:
+    Block       *next;            /* This field needs to be on a 16K boundary and the first field in the block
+                                     for LIFO lists to work. */
+    Block       *previous;        /* Use double linked list to speed up removal */
+    unsigned int objectSize;
+    ThreadId     owner;
+    FreeObject  *bumpPtr;         /* Bump pointer moves from the end to the beginning of a block */
+    FreeObject  *freeList;
+    BackRefIdx   backRefIdx;
+    unsigned int allocatedCount;  /* Number of objects allocated (obviously by the owning thread) */
+    bool         isFull;
+
+    friend void *BootStrapBlocks::allocate(size_t size);
+    friend class FreeBlockPool;
+    friend class StartupBlock;
+    friend void BlockI::initialize(void *bumpPtr);
+};
+    
+class Block : public LocalBlockFields {
+    size_t       __pad_local_fields[(blockHeaderAlignment-sizeof(LocalBlockFields))/sizeof(size_t)];
+    FreeObject  *publicFreeList;
+    Block       *nextPrivatizable;
+    size_t       __pad_public_fields[(blockHeaderAlignment-2*sizeof(void*))/sizeof(size_t)];
+
+public:
+    static Block *getEmpty(size_t size);
+
+    inline FreeObject* allocate();
+    inline FreeObject *allocateFromFreeList();
+    inline bool emptyEnoughToUse();
+    bool freeListNonNull() { return freeList; }
+    void freePublicObject(FreeObject *objectToFree);
+    inline void freeOwnObject(FreeObject *objectToFree);
+    void returnEmpty(bool poolTheBlock);
+    void privatizePublicFreeList();
+    void restoreBumpPtr();
+    void privatizeOrphaned(Bin *bin);
+    void shareOrphaned(const Bin *bin);
+    unsigned int getSize() const { return objectSize; }
+    const BackRefIdx *getBackRefIdx() const { return &backRefIdx; }
+    bool ownBlock() const { return owner.own(); }
+    bool isStartupAllocObject() const { return objectSize == startupAllocObjSizeMark; }
+    inline FreeObject *findObjectToFree(void *object) const;
+    bool checkFreePrecond() const { return allocatedCount>0; }
+    const BackRefIdx *getBackRef() const { return &backRefIdx; }
+    
+protected:
+    static Block *getRaw(bool startup);
+    void cleanBlockHeader();
+
+private:
+    static const float emptyEnoughRatio; /* "Reactivate" a block if this share of its objects is free. */
+
+    inline FreeObject *allocateFromBumpPtr();
+    void initEmptyBlock(size_t size);
+    inline FreeObject *findAllocatedObject(const void *address) const;
+    inline bool isProperlyPlaced(const void *object) const;
+
+    friend class Bin;
+    friend void ::mallocThreadShutdownNotification(void* arg);
+    friend BlockI *BlockI::getRawBlock(bool startup);
+};
+
+const float Block::emptyEnoughRatio = 1.0 / 4.0;
+
+class Bin {
+    Block      *activeBlk;
+    Block      *mailbox;
+    MallocMutex mailLock;
+
+    static TLSData* createTLS();
+public:
+    static inline Bin* getAllocationBin(size_t size);
+
+    inline Block* getActiveBlock() const { return activeBlk; }
+    inline void setActiveBlock(Block *block);
+    inline Block* setPreviousBlockActive();
+    Block* getPublicFreeListBlock();
+    void moveBlockToBinFront(Block *block);
+    void processLessUsedBlock(Block *block);
+
+    void outofTLSBin (Block* block);
+    void verifyTLSBin (size_t size) const;
+    void pushTLSBin(Block* block);
+
+    friend void ::mallocThreadShutdownNotification(void* arg);
+    friend void Block::freePublicObject (FreeObject *objectToFree);
+};
+
+/********* End of the data structures                    **************/
+
+/*
+ * There are bins for all 8 byte aligned objects less than this segregated size; 8 bins in total
+ */
+const uint32_t minSmallObjectIndex = 0;
+const uint32_t numSmallObjectBins = 8;
+const uint32_t maxSmallObjectSize = 64;
+
+/*
+ * There are 4 bins between each couple of powers of 2 [64-128-256-...]
+ * from maxSmallObjectSize till this size; 16 bins in total
+ */
+const uint32_t minSegregatedObjectIndex = minSmallObjectIndex+numSmallObjectBins;
+const uint32_t numSegregatedObjectBins = 16;
+const uint32_t maxSegregatedObjectSize = 1024;
+
+/*
+ * And there are 5 bins with the following allocation sizes: 1792, 2688, 3968, 5376, 8064.
+ * They selected to fit 9, 6, 4, 3, and 2 sizes per a block, and also are multiples of 128.
+ * If sizeof(Block) changes from 128, these sizes require close attention!
+ */
+const uint32_t minFittingIndex = minSegregatedObjectIndex+numSegregatedObjectBins;
+const uint32_t numFittingBins = 5;
+
+const uint32_t fittingAlignment = 128;
+
+#define SET_FITTING_SIZE(N) ( (blockSize-sizeof(Block))/N ) & ~(fittingAlignment-1)
+const uint32_t fittingSize1 = SET_FITTING_SIZE(9);
+const uint32_t fittingSize2 = SET_FITTING_SIZE(6);
+const uint32_t fittingSize3 = SET_FITTING_SIZE(4);
+const uint32_t fittingSize4 = SET_FITTING_SIZE(3);
+const uint32_t fittingSize5 = SET_FITTING_SIZE(2);
+#undef SET_FITTING_SIZE
+
+/*
+ * The total number of thread-specific Block-based bins
+ */
+const uint32_t numBlockBins = minFittingIndex+numFittingBins;
+
+/*
+ * Objects of this size and larger are considered large objects.
+ */
+const uint32_t minLargeObjectSize = fittingSize5 + 1;
+
+/*
+ * When a block that is not completely free is returned for reuse by other threads
+ * this is where the block goes.
+ *
+ * LifoList assumes zero initialization; so below its constructors are omitted,
+ * to avoid linking with C++ libraries on Linux.
+ */
+
+class OrphanedBlocks {
+    LifoList bins[numBlockBinLimit];
+public:
+    Block *get(Bin* bin, unsigned int size);
+    void put(Bin* bin, Block *block);
+};
+
+static char globalBinSpace[sizeof(LifoList)*numBlockBinLimit];
+static OrphanedBlocks *orphanedBlocks = (OrphanedBlocks*)globalBinSpace;
+
+/*
+ * Per-thread pool of 16KB blocks. Idea behind it is to not share with other 
+ * threads memory that are likely in local cache(s) of our CPU.
+ */
+class FreeBlockPool {
+    Block *head;
+    Block *tail;
+    int    size;
+    void insertBlock(Block *block);
+public:
+    static const int POOL_HIGH_MARK = 32;
+    static const int POOL_LOW_MARK  = 8;
+
+    Block *getBlock();
+    void returnBlock(Block *block);
+    void releaseAllBlocks();
+};
+
+struct TLSData {
+    Bin           bin[numBlockBinLimit];
+    FreeBlockPool pool;
+};
+
+#if MALLOC_CHECK_RECURSION
+MallocMutex RecursiveMallocCallProtector::rmc_mutex;
+pthread_t   RecursiveMallocCallProtector::owner_thread;
+void       *RecursiveMallocCallProtector::autoObjPtr;
+bool        RecursiveMallocCallProtector::mallocRecursionDetected;
+#if __FreeBSD__
+bool        RecursiveMallocCallProtector::canUsePthread;
+#endif
+
+#endif
+
+/*********** Code to provide thread ID and a thread-local void pointer **********/
+
+static tls_key_t TLS_pointer_key;
+
+static inline TLSData* getThreadMallocTLS() {
+    TLSData *result;
+    result = (TLSData *)TlsGetValue_func( TLS_pointer_key );
+// The assert below is incorrect: with lazy initialization, it fails on the first call of the function.
+//    MALLOC_ASSERT( result, "Memory allocator not initialized" );
+    return result;
+}
+
+static inline void  setThreadMallocTLS( TLSData * newvalue ) {
+    RecursiveMallocCallProtector scoped;
+    TlsSetValue_func( TLS_pointer_key, newvalue );
+}
+
+/*********** End code to provide thread ID and a TLS pointer **********/
+
+#if !MALLOC_DEBUG
+#if __INTEL_COMPILER || _MSC_VER
+#define NOINLINE(decl) __declspec(noinline) decl
+#define ALWAYSINLINE(decl) __forceinline decl
+#elif __GNUC__
+#define NOINLINE(decl) decl __attribute__ ((noinline))
+#define ALWAYSINLINE(decl) decl __attribute__ ((always_inline))
+#else
+#define NOINLINE(decl) decl
+#define ALWAYSINLINE(decl) decl
+#endif
+
+static NOINLINE( void doInitialization() );
+
+ALWAYSINLINE( bool isMallocInitialized() );
+
+#undef ALWAYSINLINE
+#undef NOINLINE
+#endif /* !MALLOC_DEBUG */
+
+
+/********* Now some rough utility code to deal with indexing the size bins. **************/
+
+/*
+ * Given a number return the highest non-zero bit in it. It is intended to work with 32-bit values only.
+ * Moreover, on IPF, for sake of simplicity and performance, it is narrowed to only serve for 64 to 1023.
+ * This is enough for current algorithm of distribution of sizes among bins.
+ */
+#if _WIN64 && _MSC_VER>=1400 && !__INTEL_COMPILER
+extern "C" unsigned char _BitScanReverse( unsigned long* i, unsigned long w );
+#pragma intrinsic(_BitScanReverse)
+#endif
+static inline unsigned int highestBitPos(unsigned int n)
+{
+    unsigned int pos;
+#if __ARCH_x86_32||__ARCH_x86_64
+
+# if __linux__||__APPLE__||__FreeBSD__||__sun||__MINGW32__
+    __asm__ ("bsr %1,%0" : "=r"(pos) : "r"(n));
+# elif (_WIN32 && (!_WIN64 || __INTEL_COMPILER))
+    __asm
+    {
+        bsr eax, n
+        mov pos, eax
+    }
+# elif _WIN64 && _MSC_VER>=1400
+    _BitScanReverse((unsigned long*)&pos, (unsigned long)n);
+# else
+#   error highestBitPos() not implemented for this platform
+# endif
+
+#elif __ARCH_ipf || __ARCH_other
+    static unsigned int bsr[16] = {0,6,7,7,8,8,8,8,9,9,9,9,9,9,9,9};
+    MALLOC_ASSERT( n>=64 && n<1024, ASSERT_TEXT );
+    pos = bsr[ n>>6 ];
+#else
+#   error highestBitPos() not implemented for this platform
+#endif /* __ARCH_* */
+    return pos;
+}
+
+/*
+ * Depending on indexRequest, for a given size return either the index into the bin
+ * for objects of this size, or the actual size of objects in this bin.
+ */
+template<bool indexRequest>
+static unsigned int getIndexOrObjectSize (unsigned int size)
+{
+    if (size <= maxSmallObjectSize) { // selection from 4/8/16/24/32/40/48/56/64
+         /* Index 0 holds up to 8 bytes, Index 1 16 and so forth */
+        return indexRequest ? (size - 1) >> 3 : alignUp(size,8);
+    }
+    else if (size <= maxSegregatedObjectSize ) { // 80/96/112/128 / 160/192/224/256 / 320/384/448/512 / 640/768/896/1024
+        unsigned int order = highestBitPos(size-1); // which group of bin sizes?
+        MALLOC_ASSERT( 6<=order && order<=9, ASSERT_TEXT );
+        if (indexRequest)
+            return minSegregatedObjectIndex - (4*6) - 4 + (4*order) + ((size-1)>>(order-2));
+        else {
+            unsigned int alignment = 128 >> (9-order); // alignment in the group
+            MALLOC_ASSERT( alignment==16 || alignment==32 || alignment==64 || alignment==128, ASSERT_TEXT );
+            return alignUp(size,alignment);
+        }
+    }
+    else {
+        if( size <= fittingSize3 ) {
+            if( size <= fittingSize2 ) {
+                if( size <= fittingSize1 )
+                    return indexRequest ? minFittingIndex : fittingSize1; 
+                else
+                    return indexRequest ? minFittingIndex+1 : fittingSize2;
+            } else
+                return indexRequest ? minFittingIndex+2 : fittingSize3;
+        } else {
+            if( size <= fittingSize5 ) {
+                if( size <= fittingSize4 )
+                    return indexRequest ? minFittingIndex+3 : fittingSize4;
+                else
+                    return indexRequest ? minFittingIndex+4 : fittingSize5;
+            } else {
+                MALLOC_ASSERT( 0,ASSERT_TEXT ); // this should not happen
+                return ~0U;
+            }
+        }
+    }
+}
+
+static unsigned int getIndex (unsigned int size)
+{
+    return getIndexOrObjectSize</*indexRequest*/true>(size);
+}
+
+static unsigned int getObjectSize (unsigned int size)
+{
+    return getIndexOrObjectSize</*indexRequest*/false>(size);
+}
+
+/*
+ * Initialization code.
+ *
+ */
+
+/*
+ * Forward Refs
+ */
+
+static BootStrapBlocks bootStrapBlocks;
+
+void *BootStrapBlocks::allocate(size_t size)
+{
+    FreeObject *result;
+
+    MALLOC_ASSERT( size == sizeof(TLSData), ASSERT_TEXT );
+
+    { // Lock with acquire
+        MallocMutex::scoped_lock scoped_cs(bootStrapLock);
+
+        if( bootStrapObjectList) {
+            result = bootStrapObjectList;
+            bootStrapObjectList = bootStrapObjectList->next;
+        } else {
+            if (!bootStrapBlock) {
+                bootStrapBlock = Block::getEmpty(size);
+                if (!bootStrapBlock) return NULL;
+            }
+            result = bootStrapBlock->bumpPtr;
+            bootStrapBlock->bumpPtr = (FreeObject *)((uintptr_t)bootStrapBlock->bumpPtr - bootStrapBlock->objectSize);
+            if ((uintptr_t)bootStrapBlock->bumpPtr < (uintptr_t)bootStrapBlock+sizeof(Block)) {
+                bootStrapBlock->bumpPtr = NULL;
+                bootStrapBlock->next = bootStrapBlockUsed;
+                bootStrapBlockUsed = bootStrapBlock;
+                bootStrapBlock = NULL;
+            }
+        }
+    } // Unlock with release
+
+    memset (result, 0, size);
+    return (void*)result;
+}
+
+void BootStrapBlocks::free(void* ptr)
+{
+    MALLOC_ASSERT( ptr, ASSERT_TEXT );
+    { // Lock with acquire
+        MallocMutex::scoped_lock scoped_cs(bootStrapLock);
+        ((FreeObject*)ptr)->next = bootStrapObjectList;
+        bootStrapObjectList = (FreeObject*)ptr;
+    } // Unlock with release
+}
+
+#if !(FREELIST_NONBLOCKING)
+static MallocMutex publicFreeListLock; // lock for changes of publicFreeList
+#endif
+
+const uintptr_t UNUSABLE = 0x1;
+inline bool isSolidPtr( void* ptr )
+{
+    return (UNUSABLE|(uintptr_t)ptr)!=UNUSABLE;
+}
+inline bool isNotForUse( void* ptr )
+{
+    return (uintptr_t)ptr==UNUSABLE;
+}
+
+/********* End rough utility code  **************/
+
+/********* Thread and block related code      *************/
+
+void Bin::verifyTLSBin (size_t size) const
+{
+#if MALLOC_DEBUG
+/* The debug version verifies the TLSBin as needed */
+    Bin*   tlsBin = getThreadMallocTLS()->bin;
+    uint32_t index = getIndex(size);
+    uint32_t objSize = getObjectSize(size);
+
+    MALLOC_ASSERT( this == tlsBin+index, ASSERT_TEXT );
+
+    if (activeBlk) {
+        MALLOC_ASSERT( activeBlk->owner.own(), ASSERT_TEXT );
+        MALLOC_ASSERT( activeBlk->objectSize == objSize, ASSERT_TEXT );
+#if MALLOC_DEBUG>1
+        for (Block* temp = activeBlk->next; temp; temp=temp->next) {
+            MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT );
+            MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT );
+            MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT );
+            MALLOC_ASSERT( temp->previous->next == temp, ASSERT_TEXT );
+            if (temp->next) {
+                MALLOC_ASSERT( temp->next->previous == temp, ASSERT_TEXT );
+            }
+        }
+        for (Block* temp = activeBlk->previous; temp; temp=temp->previous) {
+            MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT );
+            MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT );
+            MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT );
+            MALLOC_ASSERT( temp->next->previous == temp, ASSERT_TEXT );
+            if (temp->previous) {
+                MALLOC_ASSERT( temp->previous->next == temp, ASSERT_TEXT );
+            }
+        }
+#endif /* MALLOC_DEBUG>1 */
+    }
+#endif /* MALLOC_DEBUG */
+}
+
+/*
+ * Add a block to the start of this tls bin list.
+ */
+void Bin::pushTLSBin(Block* block)
+{
+    /* The objectSize should be defined and not a parameter
+       because the function is applied to partially filled blocks as well */
+    unsigned int size = block->objectSize;
+
+    MALLOC_ASSERT( block->owner == ThreadId::get(), ASSERT_TEXT );
+    MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT );
+    MALLOC_ASSERT( block->next == NULL, ASSERT_TEXT );
+    MALLOC_ASSERT( block->previous == NULL, ASSERT_TEXT );
+
+    MALLOC_ASSERT( this, ASSERT_TEXT );
+    verifyTLSBin(size);
+
+    block->next = activeBlk;
+    if( activeBlk ) {
+        block->previous = activeBlk->previous;
+        activeBlk->previous = block;
+        if( block->previous )
+            block->previous->next = block;
+    } else {
+        activeBlk = block;
+    }
+
+    verifyTLSBin(size);
+}
+
+/*
+ * Take a block out of its tls bin (e.g. before removal).
+ */
+void Bin::outofTLSBin(Block* block)
+{
+    unsigned int size = block->objectSize;
+
+    MALLOC_ASSERT( block->owner == ThreadId::get(), ASSERT_TEXT );
+    MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT );
+
+    MALLOC_ASSERT( this, ASSERT_TEXT );
+    verifyTLSBin(size);
+
+    if (block == activeBlk) {
+        activeBlk = block->previous? block->previous : block->next;
+    }
+    /* Delink the block */
+    if (block->previous) {
+        MALLOC_ASSERT( block->previous->next == block, ASSERT_TEXT );
+        block->previous->next = block->next;
+    }
+    if (block->next) {
+        MALLOC_ASSERT( block->next->previous == block, ASSERT_TEXT );
+        block->next->previous = block->previous;
+    }
+    block->next = NULL;
+    block->previous = NULL;
+
+    verifyTLSBin(size);
+}
+
+TLSData* Bin::createTLS()
+{
+    MALLOC_ASSERT( sizeof(TLSData) >= sizeof(Bin) * numBlockBins + sizeof(FreeBlockPool), ASSERT_TEXT );
+    TLSData* tls = (TLSData*) bootStrapBlocks.allocate(sizeof(TLSData));
+    if ( !tls ) return NULL;
+    /* the block contains zeroes after bootStrapMalloc, so bins are initialized */
+#if MALLOC_DEBUG
+    for (uint32_t i = 0; i < numBlockBinLimit; i++) {
+        MALLOC_ASSERT( tls->bin[i].activeBlk == 0, ASSERT_TEXT );
+        MALLOC_ASSERT( tls->bin[i].mailbox == 0, ASSERT_TEXT );
+    }
+#endif
+    setThreadMallocTLS(tls);
+    return tls;
+}
+
+/*
+ * Return the bin for the given size. If the TLS bin structure is absent, create it.
+ */
+Bin* Bin::getAllocationBin(size_t size)
+{
+    TLSData* tls = getThreadMallocTLS();
+    if( !tls ) 
+        tls = createTLS();
+    MALLOC_ASSERT( tls, ASSERT_TEXT );
+    return tls->bin + getIndex(size);
+}
+
+Block* Bin::getPublicFreeListBlock()
+{
+    Block* block;
+    MALLOC_ASSERT( this, ASSERT_TEXT );
+    // if this method is called, active block usage must be unsuccesful
+    MALLOC_ASSERT( !activeBlk && !mailbox || activeBlk && activeBlk->isFull, ASSERT_TEXT );
+
+// the counter should be changed    STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList);
+    {
+        MallocMutex::scoped_lock scoped_cs(mailLock);
+        block = mailbox;
+        if( block ) {
+            MALLOC_ASSERT( block->ownBlock(), ASSERT_TEXT );
+            MALLOC_ASSERT( !isNotForUse(block->nextPrivatizable), ASSERT_TEXT );
+            mailbox = block->nextPrivatizable;
+            block->nextPrivatizable = (Block*) this;
+        }
+    }
+    if( block ) {
+        MALLOC_ASSERT( isSolidPtr(block->publicFreeList), ASSERT_TEXT );
+        block->privatizePublicFreeList();
+    }
+    return block;
+}
+
+BlockI *BlockI::getRawBlock(bool startup) { return Block::getRaw(startup); }
+
+void BlockI::initialize(void *ptr) { ((LocalBlockFields*)this)->bumpPtr = (FreeObject*)ptr; }
+
+bool Block::emptyEnoughToUse()
+{
+    const float threshold = (blockSize - sizeof(Block)) * (1-emptyEnoughRatio);
+
+    if (bumpPtr) {
+        /* If we are still using a bump ptr for this block it is empty enough to use. */
+        STAT_increment(owner, getIndex(objectSize), examineEmptyEnough);
+        isFull = false;
+        return 1;
+    }
+
+    /* allocatedCount shows how many objects in the block are in use; however it still counts
+       blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */
+    isFull = (allocatedCount*objectSize > threshold)? true: false;
+#if COLLECT_STATISTICS
+    if (isFull)
+        STAT_increment(owner, getIndex(objectSize), examineNotEmpty);
+    else
+        STAT_increment(owner, getIndex(objectSize), examineEmptyEnough);
+#endif
+    return !isFull;
+}
+
+/* Restore the bump pointer for an empty block that is planned to use */
+void Block::restoreBumpPtr()
+{
+    MALLOC_ASSERT( allocatedCount == 0, ASSERT_TEXT );
+    MALLOC_ASSERT( publicFreeList == NULL, ASSERT_TEXT );
+    STAT_increment(owner, getIndex(objectSize), freeRestoreBumpPtr);
+    bumpPtr = (FreeObject *)((uintptr_t)this + blockSize - objectSize);
+    freeList = NULL;
+    isFull = 0;
+}
+
+void Block::freeOwnObject(FreeObject *objectToFree)
+{
+    objectToFree->next = freeList;
+    freeList = objectToFree;
+    allocatedCount--;
+    MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT );
+#if COLLECT_STATISTICS
+    if (getActiveBlock(getAllocationBin(block->objectSize)) != block)
+        STAT_increment(myTid, getIndex(block->objectSize), freeToInactiveBlock);
+    else
+        STAT_increment(myTid, getIndex(block->objectSize), freeToActiveBlock);
+#endif
+    if (isFull) {
+        if (emptyEnoughToUse())
+            Bin::getAllocationBin(objectSize)->moveBlockToBinFront(this);
+    } else {
+        if (allocatedCount==0 && publicFreeList==NULL)
+            Bin::getAllocationBin(objectSize)->processLessUsedBlock(this);
+    }
+}
+
+void Block::freePublicObject (FreeObject *objectToFree)
+{
+    FreeObject *localPublicFreeList;
+
+    MALLOC_ITT_SYNC_RELEASING(&publicFreeList);
+#if FREELIST_NONBLOCKING
+    FreeObject *temp = publicFreeList;
+    do {
+        localPublicFreeList = objectToFree->next = temp;
+        temp = (FreeObject*)AtomicCompareExchange(
+                                (intptr_t&)publicFreeList,
+                                (intptr_t)objectToFree, (intptr_t)localPublicFreeList );
+        // no backoff necessary because trying to make change, not waiting for a change
+    } while( temp != localPublicFreeList );
+#else
+    STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList);
+    {
+        MallocMutex::scoped_lock scoped_cs(publicFreeListLock);
+        localPublicFreeList = objectToFree->next = publicFreeList;
+        publicFreeList = objectToFree;
+    }
+#endif
+
+    if( localPublicFreeList==NULL ) {
+        // if the block is abandoned, its nextPrivatizable pointer should be UNUSABLE
+        // otherwise, it should point to the bin the block belongs to.
+        // reading nextPrivatizable is thread-safe below, because:
+        // 1) the executing thread atomically got localPublicFreeList==NULL and changed it to non-NULL;
+        // 2) only owning thread can change it back to NULL,
+        // 3) but it can not be done until the block is put to the mailbox
+        // So the executing thread is now the only one that can change nextPrivatizable
+        if( !isNotForUse(nextPrivatizable) ) {
+            MALLOC_ASSERT( nextPrivatizable!=NULL, ASSERT_TEXT );
+            MALLOC_ASSERT( owner.defined(), ASSERT_TEXT );
+            Bin* theBin = (Bin*) nextPrivatizable;
+            MallocMutex::scoped_lock scoped_cs(theBin->mailLock);
+            nextPrivatizable = theBin->mailbox;
+            theBin->mailbox = this;
+        } else {
+            MALLOC_ASSERT( !owner.defined(), ASSERT_TEXT );
+        }
+    }
+    STAT_increment(ThreadId::get(), ThreadCommonCounters, freeToOtherThread);
+    STAT_increment(owner, getIndex(objectSize), freeByOtherThread);
+}
+
+void Block::privatizePublicFreeList()
+{
+    FreeObject *temp, *localPublicFreeList;
+
+    MALLOC_ASSERT( owner.own(), ASSERT_TEXT );
+#if FREELIST_NONBLOCKING
+    temp = publicFreeList;
+    do {
+        localPublicFreeList = temp;
+        temp = (FreeObject*)AtomicCompareExchange(
+                                (intptr_t&)publicFreeList,
+                                0, (intptr_t)localPublicFreeList);
+        // no backoff necessary because trying to make change, not waiting for a change
+    } while( temp != localPublicFreeList );
+#else
+    STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList);
+    {
+        MallocMutex::scoped_lock scoped_cs(publicFreeListLock);
+        localPublicFreeList = publicFreeList;
+        publicFreeList = NULL;
+    }
+    temp = localPublicFreeList;
+#endif
+    MALLOC_ITT_SYNC_ACQUIRED(&publicFreeList);
+
+    MALLOC_ASSERT( localPublicFreeList && localPublicFreeList==temp, ASSERT_TEXT ); // there should be something in publicFreeList!
+    if( !isNotForUse(temp) ) { // return/getPartialBlock could set it to UNUSABLE
+        MALLOC_ASSERT( allocatedCount <= (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT );
+        /* other threads did not change the counter freeing our blocks */
+        allocatedCount--;
+        while( isSolidPtr(temp->next) ){ // the list will end with either NULL or UNUSABLE
+            temp = temp->next;
+            allocatedCount--;
+        }
+        MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT );
+        /* merge with local freeList */
+        temp->next = freeList;
+        freeList = localPublicFreeList;
+        STAT_increment(owner, getIndex(objectSize), allocPrivatized);
+    }
+}
+
+void Block::privatizeOrphaned(Bin* bin)
+{
+    next = NULL;
+    previous = NULL;
+    MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT );
+    /* There is not a race here since no other thread owns this block */
+    MALLOC_ASSERT( !owner.defined(), ASSERT_TEXT );
+    owner = ThreadId::get();
+    // It is safe to change nextPrivatizable, as publicFreeList is not null
+    MALLOC_ASSERT( isNotForUse(nextPrivatizable), ASSERT_TEXT );
+    nextPrivatizable = (Block*)bin;
+    // the next call is required to change publicFreeList to 0
+    privatizePublicFreeList();
+    if( allocatedCount ) {
+        emptyEnoughToUse(); // check its fullness and set result->isFull
+    } else {
+        restoreBumpPtr();
+    }
+    MALLOC_ASSERT( !isNotForUse(publicFreeList), ASSERT_TEXT );
+}
+
+void Block::shareOrphaned(const Bin *bin)
+{
+    MALLOC_ASSERT( bin, ASSERT_TEXT );
+    STAT_increment(owner, index, freeBlockPublic);
+    // need to set publicFreeList to non-zero, so other threads
+    // will not change nextPrivatizable and it can be zeroed.
+    if ((intptr_t)nextPrivatizable==(intptr_t)bin) {
+        void* oldval;
+#if FREELIST_NONBLOCKING
+        oldval = (void*)AtomicCompareExchange((intptr_t&)publicFreeList, (intptr_t)UNUSABLE, 0);
+#else
+        STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList);
+        {
+            MallocMutex::scoped_lock scoped_cs(publicFreeListLock);
+            if ( (oldval=publicFreeList)==NULL )
+                (uintptr_t&)(publicFreeList) = UNUSABLE;
+        }
+#endif
+        if ( oldval!=NULL ) {
+            // another thread freed an object; we need to wait until it finishes.
+            // I believe there is no need for exponential backoff, as the wait here is not for a lock;
+            // but need to yield, so the thread we wait has a chance to run.
+            int count = 256;
+            while( (intptr_t)const_cast<Block* volatile &>(nextPrivatizable)==(intptr_t)bin ) {
+                if (--count==0) {
+                    do_yield();
+                    count = 256;
+                }
+            }
+        }
+    } else {
+        MALLOC_ASSERT( isSolidPtr(publicFreeList), ASSERT_TEXT );
+    }
+    MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT );
+    // now it is safe to change our data
+    previous = NULL;
+    owner.undef();
+    // it is caller responsibility to ensure that the list of blocks
+    // formed by nextPrivatizable pointers is kept consistent if required.
+    // if only called from thread shutdown code, it does not matter.
+    (uintptr_t&)(nextPrivatizable) = UNUSABLE;
+}
+
+void Block::cleanBlockHeader()
+{
+    next = NULL;
+    previous = NULL;
+    freeList = NULL;
+    allocatedCount = 0;
+    isFull = 0;
+
+    publicFreeList = NULL;
+}
+
+void Block::initEmptyBlock(size_t size)
+{
+    // Having getIndex and getObjectSize called next to each other
+    // allows better compiler optimization as they basically share the code.
+    unsigned int index = getIndex(size);
+    unsigned int objSz = getObjectSize(size); 
+    Bin* tlsBin = getThreadMallocTLS()->bin;
+
+    cleanBlockHeader();
+    objectSize = objSz;
+    owner = ThreadId::get();
+    // bump pointer should be prepared for first allocation - thus mode it down to objectSize
+    bumpPtr = (FreeObject *)((uintptr_t)this + blockSize - objectSize);
+
+    // each block should have the address where the head of the list of "privatizable" blocks is kept
+    // the only exception is a block for boot strap which is initialized when TLS is yet NULL
+    nextPrivatizable = tlsBin? (Block*)(tlsBin + index) : NULL;
+    TRACEF(( "[ScalableMalloc trace] Empty block %p is initialized, owner is %d, objectSize is %d, bumpPtr is %p\n",
+             this, owner, objectSize, bumpPtr ));
+}
+
+Block *OrphanedBlocks::get(Bin* bin, unsigned int size)
+{
+    Block *result;
+    MALLOC_ASSERT( bin, ASSERT_TEXT );
+    unsigned int index = getIndex(size);
+    result = (Block *) bins[index].pop();
+    if (result) {
+        MALLOC_ITT_SYNC_ACQUIRED(bins+index);
+        result->privatizeOrphaned(bin);
+        STAT_increment(result->owner, index, allocBlockPublic);
+    }
+    return result;
+}
+
+void OrphanedBlocks::put(Bin* bin, Block *block)
+{
+    unsigned int index = getIndex(block->getSize());
+    block->shareOrphaned(bin);
+    MALLOC_ITT_SYNC_RELEASING(bins+index);
+    bins[index].push((void **)block);
+}
+
+void FreeBlockPool::insertBlock(Block *block)
+{
+    size++;
+    block->next = head;
+    head = block;
+    if (!tail)
+        tail = block;
+}
+
+Block *FreeBlockPool::getBlock()
+{
+    Block *result = head;
+    if (head) {
+        size--;
+        head = head->next;
+        if (!head)
+            tail = NULL;
+    }
+    return result;
+}
+
+void FreeBlockPool::returnBlock(Block *block)
+{
+    MALLOC_ASSERT( size <= POOL_HIGH_MARK, ASSERT_TEXT );
+    if (size == POOL_HIGH_MARK) {
+        // release cold blocks and add hot one
+        Block *headToFree = head, 
+              *tailToFree = tail;
+        for (int i=0; i<POOL_LOW_MARK-2; i++)
+            headToFree = headToFree->next;
+        tail = headToFree;
+        headToFree = headToFree->next;
+        tail->next = NULL;
+        size = POOL_LOW_MARK-1;
+        for (Block *currBl = headToFree; currBl; currBl = currBl->next)
+            removeBackRef(currBl->backRefIdx);
+        freeBlocks.putList(headToFree, tailToFree);
+    }
+    insertBlock(block);
+}
+
+void FreeBlockPool::releaseAllBlocks()
+{
+    if (head) {
+        for (Block *currBl = head; currBl; currBl = currBl->next)
+            removeBackRef(currBl->backRefIdx);
+        freeBlocks.putList(head, tail);
+    }
+}
+
+/* Return an empty uninitialized block in a non-blocking fashion. */
+Block *Block::getRaw(bool startup)
+{
+    Block *result = NULL;
+    Block *bigBlock;
+
+    if (! (bigBlock = static_cast<Block*>(freeBlocks.get(startup)))) return NULL;
+
+    // check alignment
+    MALLOC_ASSERT( isAligned( bigBlock, blockSize ), ASSERT_TEXT );
+    MALLOC_ASSERT( isAligned( bigBlock->bumpPtr, blockSize ), ASSERT_TEXT );
+    // block should be at least as big as blockSize; otherwise the previous block can be damaged.
+    MALLOC_ASSERT( (uintptr_t)bigBlock->bumpPtr >= (uintptr_t)bigBlock + blockSize, ASSERT_TEXT );
+    bigBlock->bumpPtr = (FreeObject *)((uintptr_t)bigBlock->bumpPtr - blockSize);
+    result = (Block *)bigBlock->bumpPtr;
+    if ( result!=bigBlock ) {
+        TRACEF(( "[ScalableMalloc trace] Pushing partial rest of block back on.\n" ));
+        freeBlocks.put(bigBlock, startup);
+    }
+
+    return result;
+}
+
+/* Return an empty uninitialized block in a non-blocking fashion. */
+Block *Block::getEmpty(size_t size)
+{
+    Block *result = NULL;
+    TLSData* tls = getThreadMallocTLS();
+    if (tls)
+        result = tls->pool.getBlock();
+    if (!result) {
+        BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/false);
+        if (backRefIdx.isInvalid() || !(result = getRaw(/*startup=*/false)))
+            return NULL;
+        setBackRef(backRefIdx, result);
+        result->backRefIdx = backRefIdx;
+    }
+    if (result) {
+        result->initEmptyBlock(size);
+        STAT_increment(result->owner, getIndex(result->objectSize), allocBlockNew);
+    }
+    return result;
+}
+
+/* We have a block give it back to the malloc block manager */
+void Block::returnEmpty(bool poolTheBlock)
+{
+    // it is caller's responsibility to ensure no data is lost before calling this
+    MALLOC_ASSERT( allocatedCount==0, ASSERT_TEXT );
+    MALLOC_ASSERT( publicFreeList==NULL, ASSERT_TEXT );
+    MALLOC_ASSERT( !poolTheBlock || next == NULL, ASSERT_TEXT );
+    MALLOC_ASSERT( !poolTheBlock || previous == NULL, ASSERT_TEXT );
+    STAT_increment(owner, getIndex(objectSize), freeBlockBack);
+
+    cleanBlockHeader();
+
+    nextPrivatizable = NULL;
+
+    objectSize = 0;
+    owner.invalid();
+    // for an empty block, bump pointer should point right after the end of the block
+    bumpPtr = (FreeObject *)((uintptr_t)this + blockSize);
+    if (poolTheBlock) {
+        MALLOC_ASSERT(getThreadMallocTLS(), "Is TLS still not initialized?");
+        getThreadMallocTLS()->pool.returnBlock(this);
+    }
+    else {
+        removeBackRef(backRefIdx);
+        freeBlocks.put(this, /*startup=*/false);
+    }
+}
+
+inline void Bin::setActiveBlock (Block *block)
+{
+//    MALLOC_ASSERT( bin, ASSERT_TEXT );
+    MALLOC_ASSERT( block->owner.own(), ASSERT_TEXT );
+    // it is the caller responsibility to keep bin consistence (i.e. ensure this block is in the bin list)
+    activeBlk = block;
+}
+
+inline Block* Bin::setPreviousBlockActive()
+{
+    MALLOC_ASSERT( activeBlk, ASSERT_TEXT );
+    Block* temp = activeBlk->previous;
+    if( temp ) {
+        MALLOC_ASSERT( temp->isFull == 0, ASSERT_TEXT );
+        activeBlk = temp;
+    }
+    return temp;
+}
+
+FreeObject *Block::findObjectToFree(void *object) const
+{
+    FreeObject *objectToFree;
+    // Due to aligned allocations, a pointer passed to scalable_free
+    // might differ from the address of internally allocated object.
+    // Small objects however should always be fine.    
+    if (objectSize <= maxSegregatedObjectSize)
+        objectToFree = (FreeObject*)object;
+    // "Fitting size" allocations are suspicious if aligned higher than naturally
+    else {
+        if ( ! isAligned(object,2*fittingAlignment) )
+            // TODO: the above check is questionable - it gives false negatives in ~50% cases,
+            //       so might even be slower in average than unconditional use of findAllocatedObject.
+            // here it should be a "real" object
+            objectToFree = (FreeObject*)object;
+        else
+            // here object can be an aligned address, so applying additional checks
+            objectToFree = findAllocatedObject(object);
+        MALLOC_ASSERT( isAligned(objectToFree,fittingAlignment), ASSERT_TEXT );
+    }
+    MALLOC_ASSERT( isProperlyPlaced(objectToFree), ASSERT_TEXT );
+
+    return objectToFree;
+}
+
+#if MALLOC_CHECK_RECURSION
+
+/*
+ * It's a special kind of allocation that can be used when malloc is 
+ * not available (either during startup or when malloc was already called and
+ * we are, say, inside pthread_setspecific's call). 
+ * Block can contain objects of different sizes, 
+ * allocations are performed by moving bump pointer and increasing of object counter, 
+ * releasing is done via counter of objects allocated in the block 
+ * or moving bump pointer if releasing object is on a bound.
+ */
+
+class StartupBlock : public Block {
+    size_t availableSize() {
+        return blockSize - ((uintptr_t)bumpPtr - (uintptr_t)this);
+    }
+    static StartupBlock *getBlock();
+public:
+    static FreeObject *allocate(size_t size);
+    static size_t msize(void *ptr) { return *((size_t*)ptr - 1); }
+    void free(void *ptr);
+};
+
+static MallocMutex startupMallocLock;
+static StartupBlock *firstStartupBlock;
+
+StartupBlock *StartupBlock::getBlock()
+{
+    BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/false);
+    if (backRefIdx.isInvalid()) return NULL;
+
+    StartupBlock *block = (StartupBlock *)getRaw(/*startup=*/true);
+    if (!block) return NULL;
+
+    block->cleanBlockHeader();
+    setBackRef(backRefIdx, block);
+    block->backRefIdx = backRefIdx;
+    // use startupAllocObjSizeMark to mark objects from startup block marker
+    block->objectSize = startupAllocObjSizeMark;
+    block->bumpPtr = (FreeObject *)((uintptr_t)block + sizeof(StartupBlock));
+    return block;
+}
+
+/* TODO: Function is called when malloc nested call is detected, so simultaneous
+   usage from different threads are unprobable, so block pre-allocation 
+   can be not useful, and the code might be simplified. */
+FreeObject *StartupBlock::allocate(size_t size)
+{
+    FreeObject *result;
+    StartupBlock *newBlock = NULL;
+    bool newBlockUnused = false;
+
+    /* Objects must be aligned on their natural bounds, 
+       and objects bigger than word on word's bound. */
+    size = alignUp(size, sizeof(size_t));
+    // We need size of an object to implement msize.
+    size_t reqSize = size + sizeof(size_t);
+    // speculatively allocates newBlock to later use or return it as unused
+    if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize)
+        if (!(newBlock = StartupBlock::getBlock()))
+            return NULL;
+
+    {
+        MallocMutex::scoped_lock scoped_cs(startupMallocLock);
+    
+        if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize) {
+            if (!newBlock && !(newBlock = StartupBlock::getBlock()))
+                return NULL;
+            newBlock->next = (Block*)firstStartupBlock;
+            if (firstStartupBlock)
+                firstStartupBlock->previous = (Block*)newBlock;
+            firstStartupBlock = newBlock;
+        } else
+            newBlockUnused = true;
+        result = firstStartupBlock->bumpPtr;
+        firstStartupBlock->allocatedCount++;
+        firstStartupBlock->bumpPtr = 
+            (FreeObject *)((uintptr_t)firstStartupBlock->bumpPtr + reqSize);
+    }
+    if (newBlock && newBlockUnused)
+        newBlock->returnEmpty(/*poolTheBlock=*/false);
+
+    // keep object size at the negative offset
+    *((size_t*)result) = size;
+    return (FreeObject*)((size_t*)result+1);
+}
+
+void StartupBlock::free(void *ptr)
+{
+    Block* blockToRelease = NULL;
+    {
+        MallocMutex::scoped_lock scoped_cs(startupMallocLock);
+    
+        MALLOC_ASSERT(firstStartupBlock, ASSERT_TEXT);
+        MALLOC_ASSERT(startupAllocObjSizeMark==objectSize 
+                      && allocatedCount>0, ASSERT_TEXT);
+        MALLOC_ASSERT((uintptr_t)ptr>=(uintptr_t)this+sizeof(StartupBlock)
+                      && (uintptr_t)ptr+StartupBlock::msize(ptr)<=(uintptr_t)this+blockSize, 
+                      ASSERT_TEXT);
+        if (0 == --allocatedCount) {
+            if (this == firstStartupBlock)
+                firstStartupBlock = (StartupBlock*)firstStartupBlock->next;
+            if (previous)
+                previous->next = next;
+            if (next)
+                next->previous = previous;
+            blockToRelease = this;
+        } else if ((uintptr_t)ptr + StartupBlock::msize(ptr) == (uintptr_t)bumpPtr) {
+            // last object in the block released
+            FreeObject *newBump = (FreeObject*)((size_t*)ptr - 1);
+            MALLOC_ASSERT((uintptr_t)newBump>(uintptr_t)this+sizeof(StartupBlock), 
+                          ASSERT_TEXT);
+            bumpPtr = newBump;
+        }
+    }
+    if (blockToRelease) {
+        blockToRelease->previous = blockToRelease->next = NULL;
+        blockToRelease->returnEmpty(/*poolTheBlock=*/false);
+    }
+}
+
+#endif /* MALLOC_CHECK_RECURSION */
+
+/********* End thread related code  *************/
+
+/********* Library initialization *************/
+
+//! Value indicating the state of initialization.
+/* 0 = initialization not started.
+ * 1 = initialization started but not finished.
+ * 2 = initialization finished.
+ * In theory, we only need values 0 and 2. But value 1 is nonetheless
+ * useful for detecting errors in the double-check pattern.
+ */
+static intptr_t mallocInitialized;   // implicitly initialized to 0
+static MallocMutex initMutex;
+
+inline bool isMallocInitialized() {
+    // Load must have acquire fence; otherwise thread taking "initialized" path
+    // might perform textually later loads *before* mallocInitialized becomes 2.
+    return 2 == FencedLoad(mallocInitialized);
+}
+
+bool isMallocInitializedExt() {
+    return isMallocInitialized();
+}
+
+/*
+ * Allocator initialization routine;
+ * it is called lazily on the very first scalable_malloc call.
+ */
+static void initMemoryManager()
+{
+    TRACEF(( "[ScalableMalloc trace] sizeof(Block) is %d (expected 128); sizeof(uintptr_t) is %d\n",
+             sizeof(Block), sizeof(uintptr_t) ));
+    MALLOC_ASSERT( 2*blockHeaderAlignment == sizeof(Block), ASSERT_TEXT );
+    MALLOC_ASSERT( sizeof(FreeObject) == sizeof(void*), ASSERT_TEXT );
+
+// TODO: add error handling, and on error do something better than exit(1)
+    if (!initBackRefMaster() || !freeBlocks.bootstrap(NULL, NULL, 0)) {
+        fprintf (stderr, "The memory manager cannot access sufficient memory to initialize; exiting \n");
+        exit(1);
+    }
+// Create keys for thread-local storage and for thread id
+#if USE_WINTHREAD
+    TLS_pointer_key = TlsAlloc();
+#else
+    int status1 = pthread_key_create( &TLS_pointer_key, mallocThreadShutdownNotification );
+    if ( status1 ) {
+        fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n");
+        exit(1);
+    }
+#endif /* USE_WINTHREAD */
+    ThreadId::init();
+#if COLLECT_STATISTICS
+    initStatisticsCollection();
+#endif
+}
+
+//! Ensures that initMemoryManager() is called once and only once.
+/** Does not return until initMemoryManager() has been completed by a thread.
+    There is no need to call this routine if mallocInitialized==2 . */
+static void doInitialization()
+{
+    MallocMutex::scoped_lock lock( initMutex );
+    if (mallocInitialized!=2) {
+        MALLOC_ASSERT( mallocInitialized==0, ASSERT_TEXT );
+        mallocInitialized = 1;
+        RecursiveMallocCallProtector scoped;
+        initMemoryManager();
+#ifdef  MALLOC_EXTRA_INITIALIZATION
+        MALLOC_EXTRA_INITIALIZATION;
+#endif
+#if MALLOC_CHECK_RECURSION
+        RecursiveMallocCallProtector::detectNaiveOverload();
+#endif
+        MALLOC_ASSERT( mallocInitialized==1, ASSERT_TEXT );
+        // Store must have release fence, otherwise mallocInitialized==2 
+        // might become remotely visible before side effects of 
+        // initMemoryManager() become remotely visible.
+        FencedStore( mallocInitialized, 2 );
+    }
+    /* It can't be 0 or I would have initialized it */
+    MALLOC_ASSERT( mallocInitialized==2, ASSERT_TEXT );
+}
+
+/********* End library initialization *************/
+
+/********* The malloc show begins     *************/
+
+
+FreeObject *Block::allocateFromFreeList()
+{
+    FreeObject *result;
+
+    if (!freeList) return NULL;
+
+    result = freeList;
+    MALLOC_ASSERT( result, ASSERT_TEXT );
+
+    freeList = result->next;
+    MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT );
+    allocatedCount++;
+    STAT_increment(owner, getIndex(objectSize), allocFreeListUsed);
+
+    return result;
+}
+
+FreeObject *Block::allocateFromBumpPtr()
+{
+    FreeObject *result = bumpPtr;
+    if (result) {
+        bumpPtr = (FreeObject *) ((uintptr_t) bumpPtr - objectSize);
+        if ( (uintptr_t)bumpPtr < (uintptr_t)this+sizeof(Block) ) {
+            bumpPtr = NULL;
+        }
+        MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT );
+        allocatedCount++;
+        STAT_increment(owner, getIndex(objectSize), allocBumpPtrUsed);
+    }
+    return result;
+}
+
+inline FreeObject* Block::allocate()
+{
+    FreeObject *result;
+
+    MALLOC_ASSERT( owner.own(), ASSERT_TEXT );
+
+    /* for better cache locality, first looking in the free list. */
+    if ( (result = allocateFromFreeList()) ) {
+        return result;
+    }
+    MALLOC_ASSERT( !freeList, ASSERT_TEXT );
+
+    /* if free list is empty, try thread local bump pointer allocation. */
+    if ( (result = allocateFromBumpPtr()) ) {
+        return result;
+    }
+    MALLOC_ASSERT( !bumpPtr, ASSERT_TEXT );
+
+    /* the block is considered full. */
+    isFull = 1;
+    return NULL;
+}
+
+void Bin::moveBlockToBinFront(Block *block)
+{
+    /* move the block to the front of the bin */
+    if (block == activeBlk) return;
+    outofTLSBin(block);
+    pushTLSBin(block);
+}
+
+void Bin::processLessUsedBlock(Block *block)
+{
+    if (block != activeBlk) {
+        /* We are not actively using this block; return it to the general block pool */
+        outofTLSBin(block);
+        block->returnEmpty(/*poolTheBlock=*/true);
+    } else {
+        /* all objects are free - let's restore the bump pointer */
+        block->restoreBumpPtr();
+    }
+}
+
+/*
+ * All aligned allocations fall into one of the following categories:
+ *  1. if both request size and alignment are <= maxSegregatedObjectSize,
+ *       we just align the size up, and request this amount, because for every size
+ *       aligned to some power of 2, the allocated object is at least that aligned.
+ * 2. for bigger size, check if already guaranteed fittingAlignment is enough.
+ * 3. if size+alignment<minLargeObjectSize, we take an object of fittingSizeN and align
+ *       its address up; given such pointer, scalable_free could find the real object.
+ * 4. otherwise, aligned large object is allocated.
+ */
+static void *allocateAligned(size_t size, size_t alignment)
+{
+    MALLOC_ASSERT( isPowerOfTwo(alignment), ASSERT_TEXT );
+
+    void *result;
+    if (size<=maxSegregatedObjectSize && alignment<=maxSegregatedObjectSize)
+        result = scalable_malloc(alignUp(size? size: sizeof(size_t), alignment));
+    else if (size<minLargeObjectSize && alignment<=fittingAlignment)
+        result = scalable_malloc(size);
+    else if (size+alignment < minLargeObjectSize) {
+        void *unaligned = scalable_malloc(size+alignment);
+        if (!unaligned) return NULL;
+        result = alignUp(unaligned, alignment);
+    } else {
+        /* This can be the first allocation call. */
+        if (!isMallocInitialized()) 
+            doInitialization();
+        // take into account only alignment that are higher then natural
+        result = mallocLargeObject(size, largeObjectAlignment>alignment? 
+                                         largeObjectAlignment: alignment);
+    }
+
+    MALLOC_ASSERT( isAligned(result, alignment), ASSERT_TEXT );
+    return result;
+}
+
+static void *reallocAligned(void *ptr, size_t size, size_t alignment = 0)
+{
+    void *result;
+    size_t copySize;
+
+    if (isLargeObject(ptr)) {
+        LargeMemoryBlock* lmb = ((LargeObjectHdr *)ptr - 1)->memoryBlock;
+        copySize = lmb->unalignedSize-((uintptr_t)ptr-(uintptr_t)lmb);
+        if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) {
+            lmb->objectSize = size;
+            return ptr;
+        } else {
+            copySize = lmb->objectSize;
+            result = alignment ? allocateAligned(size, alignment) : scalable_malloc(size);
+        }
+    } else {
+        Block* block = (Block *)alignDown(ptr, blockSize);
+        copySize = block->getSize();
+        if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) {
+            return ptr;
+        } else {
+            result = alignment ? allocateAligned(size, alignment) : scalable_malloc(size);
+        }
+    }
+    if (result) {
+        memcpy(result, ptr, copySize<size? copySize: size);
+        scalable_free(ptr);
+    }
+    return result;
+}
+
+/* A predicate checks if an object is properly placed inside its block */
+inline bool Block::isProperlyPlaced(const void *object) const
+{
+    return 0 == ((uintptr_t)this + blockSize - (uintptr_t)object) % objectSize;
+}
+
+/* Finds the real object inside the block */
+FreeObject *Block::findAllocatedObject(const void *address) const
+{
+    // calculate offset from the end of the block space
+    uintptr_t offset = (uintptr_t)this + blockSize - (uintptr_t)address;
+    MALLOC_ASSERT( offset<blockSize-sizeof(Block), ASSERT_TEXT );
+    // find offset difference from a multiple of allocation size
+    offset %= objectSize;
+    // and move the address down to where the real object starts.
+    return (FreeObject*)((uintptr_t)address - (offset? objectSize-offset: 0));
+}
+
+/* 
+ * Bad dereference caused by a foreign pointer is possible only here, not earlier in call chain.
+ * Separate function isolates SEH code, as it has bad influence on compiler optimization.
+ */
+static inline BackRefIdx safer_dereference (const BackRefIdx *ptr)
+{
+    BackRefIdx id;
+#if _MSC_VER
+    __try {
+#endif
+        id = *ptr;
+#if _MSC_VER
+    } __except( GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION? 
+                EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) {
+        id = BackRefIdx();
+    }
+#endif
+    return id;
+}
+
+bool isLargeObject(void *object)
+{
+    if (!isAligned(object, largeObjectAlignment)) 
+        return false;
+    LargeObjectHdr *header = (LargeObjectHdr*)object - 1;
+    BackRefIdx idx = safer_dereference(&header->backRefIdx);
+
+    return idx.isLargeObject() && getBackRef(idx) == header;
+}
+
+static inline bool isSmallObject (void *ptr)
+{
+    void* expected = alignDown(ptr, blockSize);
+    const BackRefIdx* idx = ((Block*)expected)->getBackRef();
+
+    return expected == getBackRef(safer_dereference(idx));
+}
+
+/**** Check if an object was allocated by scalable_malloc ****/
+static inline bool isRecognized (void* ptr)
+{
+    return isLargeObject(ptr) || isSmallObject(ptr);
+}
+
+static inline void freeSmallObject (void *object) 
+{
+    /* mask low bits to get the block */
+    Block *block = (Block *)alignDown(object, blockSize);
+    MALLOC_ASSERT( block->checkFreePrecond(), ASSERT_TEXT );
+
+#if MALLOC_CHECK_RECURSION
+    if (block->isStartupAllocObject()) {
+        ((StartupBlock *)block)->free(object);
+        return;
+    }
+#endif
+    FreeObject *objectToFree = block->findObjectToFree(object);
+
+    if (block->ownBlock())
+        block->freeOwnObject(objectToFree);
+    else /* Slower path to add to the shared list, the allocatedCount is updated by the owner thread in malloc. */
+        block->freePublicObject(objectToFree);
+
+}
+
+} // namespace internal
+} // namespace rml
+
+using namespace rml::internal;
+
+/*
+ * When a thread is shutting down this routine should be called to remove all the thread ids
+ * from the malloc blocks and replace them with a NULL thread id.
+ *
+ */
+#if MALLOC_TRACE
+static unsigned int threadGoingDownCount = 0;
+#endif
+
+/*
+ * for pthreads, the function is set as a callback in pthread_key_create for TLS bin.
+ * it will be automatically called at thread exit with the key value as the argument.
+ *
+ * for Windows, it should be called directly e.g. from DllMain; the argument can be NULL
+ * one should include "TypeDefinitions.h" for the declaration of this function.
+*/
+extern "C" void mallocThreadShutdownNotification(void* arg)
+{
+    TLSData *tls;
+    Block *threadBlock;
+    Block *threadlessBlock;
+    unsigned int index;
+
+    // Check whether TLS has been initialized
+    if (!isMallocInitialized()) return;
+
+    TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return start %d\n",
+             getThreadId(),  threadGoingDownCount++ ));
+#ifdef USE_WINTHREAD
+    tls = getThreadMallocTLS();
+#else
+    tls = (TLSData*)arg;
+#endif
+    if (tls) {
+        Bin *tlsBin = tls->bin;
+        tls->pool.releaseAllBlocks();
+
+        for (index = 0; index < numBlockBins; index++) {
+            if (tlsBin[index].activeBlk==NULL)
+                continue;
+            threadlessBlock = tlsBin[index].activeBlk->previous;
+            while (threadlessBlock) {
+                threadBlock = threadlessBlock->previous;
+                if (threadlessBlock->allocatedCount==0 && threadlessBlock->publicFreeList==NULL) {
+                    /* we destroy the thread, so not use its block pool */
+                    threadlessBlock->returnEmpty(/*poolTheBlock=*/false);
+                } else {
+                    orphanedBlocks->put(tlsBin+index, threadlessBlock);
+                }
+                threadlessBlock = threadBlock;
+            }
+            threadlessBlock = tlsBin[index].activeBlk;
+            while (threadlessBlock) {
+                threadBlock = threadlessBlock->next;
+                if (threadlessBlock->allocatedCount==0 && threadlessBlock->publicFreeList==NULL) {
+                    /* we destroy the thread, so not use its block pool */
+                    threadlessBlock->returnEmpty(/*poolTheBlock=*/false);
+                } else {
+                    orphanedBlocks->put(tlsBin+index, threadlessBlock);
+                }
+                threadlessBlock = threadBlock;
+            }
+            tlsBin[index].activeBlk = 0;
+        }
+        bootStrapBlocks.free(tls);
+        setThreadMallocTLS(NULL);
+    }
+
+    TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return end\n", getThreadId() ));
+}
+
+extern "C" void mallocProcessShutdownNotification(void)
+{
+#if COLLECT_STATISTICS
+    ThreadId nThreads = ThreadIdCount;
+    for( int i=1; i<=nThreads && i<MAX_THREADS; ++i )
+        STAT_print(i);
+#endif
+}
+
+/********* The malloc code          *************/
+
+extern "C" void * scalable_malloc(size_t size)
+{
+    Bin* bin;
+    Block * mallocBlock;
+    FreeObject *result = NULL;
+
+    if (!size) size = sizeof(size_t);
+
+#if MALLOC_CHECK_RECURSION
+    if (RecursiveMallocCallProtector::sameThreadActive()) {
+        result = size<minLargeObjectSize? StartupBlock::allocate(size) : 
+              (FreeObject*)mallocLargeObject(size, blockSize, /*startupAlloc=*/ true);
+        if (!result) errno = ENOMEM;
+        return result;
+    }
+#endif
+
+    if (!isMallocInitialized()) 
+        doInitialization();
+
+    /*
+     * Use Large Object Allocation
+     */
+    if (size >= minLargeObjectSize) {
+        result = (FreeObject*)mallocLargeObject(size, largeObjectAlignment);
+        if (!result) errno = ENOMEM;
+        return result;
+    }
+
+    /*
+     * Get an element in thread-local array corresponding to the given size;
+     * It keeps ptr to the active block for allocations of this size
+     */
+    bin = Bin::getAllocationBin(size);
+    if ( !bin ) {
+        errno = ENOMEM;
+        return NULL;
+    }
+
+    /* Get the block of you want to try to allocate in. */
+    mallocBlock = bin->getActiveBlock();
+
+    if (mallocBlock) {
+        do {
+            if( (result = mallocBlock->allocate()) ) {
+                return result;
+            }
+            // the previous block, if any, should be empty enough
+        } while( (mallocBlock = bin->setPreviousBlockActive()) );
+    }
+
+    /*
+     * else privatize publicly freed objects in some block and allocate from it
+     */
+    mallocBlock = bin->getPublicFreeListBlock();
+    if (mallocBlock) {
+        if (mallocBlock->emptyEnoughToUse()) {
+            bin->moveBlockToBinFront(mallocBlock);
+        }
+        MALLOC_ASSERT( mallocBlock->freeListNonNull(), ASSERT_TEXT );
+        if ( (result = mallocBlock->allocateFromFreeList()) ) {
+            return result;
+        }
+        /* Else something strange happened, need to retry from the beginning; */
+        TRACEF(( "[ScalableMalloc trace] Something is wrong: no objects in public free list; reentering.\n" ));
+        return scalable_malloc(size);
+    }
+
+    /*
+     * no suitable own blocks, try to get a partial block that some other thread has discarded.
+     */
+    mallocBlock = orphanedBlocks->get(bin, size);
+    while (mallocBlock) {
+        bin->pushTLSBin(mallocBlock);
+        bin->setActiveBlock(mallocBlock); // TODO: move under the below condition?
+        if( (result = mallocBlock->allocate()) ) {
+            return result;
+        }
+        mallocBlock = orphanedBlocks->get(bin, size);
+    }
+
+    /*
+     * else try to get a new empty block
+     */
+    mallocBlock = Block::getEmpty(size);
+    if (mallocBlock) {
+        bin->pushTLSBin(mallocBlock);
+        bin->setActiveBlock(mallocBlock);
+        if( (result = mallocBlock->allocate()) ) {
+            return result;
+        }
+        /* Else something strange happened, need to retry from the beginning; */
+        TRACEF(( "[ScalableMalloc trace] Something is wrong: no objects in empty block; reentering.\n" ));
+        return scalable_malloc(size);
+    }
+    /*
+     * else nothing works so return NULL
+     */
+    TRACEF(( "[ScalableMalloc trace] No memory found, returning NULL.\n" ));
+    errno = ENOMEM;
+    return NULL;
+}
+
+/********* End the malloc code      *************/
+
+/********* The free code            *************/
+
+extern "C" void scalable_free (void *object) {
+if (!object)
+        return;
+
+    MALLOC_ASSERT(isRecognized(object), "Invalid pointer in scalable_free detected.");
+
+    if (isLargeObject(object))
+        freeLargeObject(object);
+    else
+        freeSmallObject(object);
+}
+
+/*
+ * A variant that provides additional memory safety, by checking whether the given address
+ * was obtained with this allocator, and if not redirecting to the provided alternative call.
+ */
+extern "C" void safer_scalable_free (void *object, void (*original_free)(void*)) 
+{
+    if (!object)
+        return;
+
+    // must check 1st for large object, because small object check touches 4 pages on left,
+    // and it can be unaccessable
+    if (isLargeObject(object))
+        freeLargeObject(object);
+    else if (isSmallObject(object))
+        freeSmallObject(object);
+    else if (original_free)
+        original_free(object);
+}
+
+/********* End the free code        *************/
+
+/********* Code for scalable_realloc       ***********/
+
+/*
+ * From K&R
+ * "realloc changes the size of the object pointed to by p to size. The contents will
+ * be unchanged up to the minimum of the old and the new sizes. If the new size is larger,
+ * the new space is uninitialized. realloc returns a pointer to the new space, or
+ * NULL if the request cannot be satisfied, in which case *p is unchanged."
+ *
+ */
+extern "C" void* scalable_realloc(void* ptr, size_t size)
+{
+    /* corner cases left out of reallocAligned to not deal with errno there */
+    if (!ptr) {
+        return scalable_malloc(size);
+    }
+    if (!size) {
+        scalable_free(ptr);
+        return NULL;
+    }
+    void* tmp = reallocAligned(ptr, size, 0);
+    if (!tmp) errno = ENOMEM;
+    return tmp;
+}
+
+/*
+ * A variant that provides additional memory safety, by checking whether the given address
+ * was obtained with this allocator, and if not redirecting to the provided alternative call.
+ */
+extern "C" void* safer_scalable_realloc (void* ptr, size_t sz, void* original_realloc) 
+{
+    if (!ptr) {
+        return scalable_malloc(sz);
+    }
+    if (isRecognized(ptr)) {
+        if (!sz) {
+            scalable_free(ptr);
+            return NULL;
+        }
+        void* tmp = reallocAligned(ptr, sz, 0);
+        if (!tmp) errno = ENOMEM;
+        return tmp;
+    }
+#if USE_WINTHREAD
+    else if (original_realloc && sz) {
+            orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(original_realloc);
+            if ( original_ptrs->orig_msize ){
+                size_t oldSize = original_ptrs->orig_msize(ptr);
+                void *newBuf = scalable_malloc(sz);
+                if (newBuf) {
+                    memcpy(newBuf, ptr, sz<oldSize? sz : oldSize);
+                    if ( original_ptrs->orig_free ){
+                        original_ptrs->orig_free( ptr );
+                    }
+                }
+                return newBuf;
+             }
+    }
+#else
+    else if (original_realloc) {
+        typedef void* (*realloc_ptr_t)(void*,size_t);
+        realloc_ptr_t original_realloc_ptr;
+        (void *&)original_realloc_ptr = original_realloc;
+        return original_realloc_ptr(ptr,sz);
+    }
+#endif
+    return NULL;
+}
+
+/********* End code for scalable_realloc   ***********/
+
+/********* Code for scalable_calloc   ***********/
+
+/*
+ * From K&R
+ * calloc returns a pointer to space for an array of nobj objects, 
+ * each of size size, or NULL if the request cannot be satisfied. 
+ * The space is initialized to zero bytes.
+ *
+ */
+
+extern "C" void * scalable_calloc(size_t nobj, size_t size)
+{
+    size_t arraySize = nobj * size;
+    void* result = scalable_malloc(arraySize);
+    if (result)
+        memset(result, 0, arraySize);
+    return result;
+}
+
+/********* End code for scalable_calloc   ***********/
+
+/********* Code for aligned allocation API **********/
+
+extern "C" int scalable_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+    if ( !isPowerOfTwoMultiple(alignment, sizeof(void*)) )
+        return EINVAL;
+    void *result = allocateAligned(size, alignment);
+    if (!result)
+        return ENOMEM;
+    *memptr = result;
+    return 0;
+}
+
+extern "C" void * scalable_aligned_malloc(size_t size, size_t alignment)
+{
+    if (!isPowerOfTwo(alignment) || 0==size) {
+        errno = EINVAL;
+        return NULL;
+    }
+    void* tmp = allocateAligned(size, alignment);
+    if (!tmp) 
+        errno = ENOMEM;
+    return tmp;
+}
+
+extern "C" void * scalable_aligned_realloc(void *ptr, size_t size, size_t alignment)
+{
+    /* corner cases left out of reallocAligned to not deal with errno there */
+    if (!isPowerOfTwo(alignment)) {
+        errno = EINVAL;
+        return NULL;
+    }
+    if (!ptr) {
+        return allocateAligned(size, alignment);
+    }
+    if (!size) {
+        scalable_free(ptr);
+        return NULL;
+    }
+
+    void* tmp = reallocAligned(ptr, size, alignment);
+    if (!tmp) errno = ENOMEM;
+    return tmp;
+}
+
+extern "C" void * safer_scalable_aligned_realloc(void *ptr, size_t size, size_t alignment, void* orig_function)
+{
+    /* corner cases left out of reallocAligned to not deal with errno there */
+    if (!isPowerOfTwo(alignment)) {
+        errno = EINVAL;
+        return NULL;
+    }
+    if (!ptr) {
+        return allocateAligned(size, alignment);
+    }
+    if (isRecognized(ptr)) {
+        if (!size) {
+            scalable_free(ptr);
+            return NULL;
+        }
+        void* tmp = reallocAligned(ptr, size, alignment);
+        if (!tmp) errno = ENOMEM;
+        return tmp;
+    }
+#if USE_WINTHREAD
+    else {
+        orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(orig_function);
+        if (size) {
+            if ( original_ptrs->orig_msize ){
+                size_t oldSize = original_ptrs->orig_msize(ptr);
+                void *newBuf = allocateAligned(size, alignment);
+                if (newBuf) {
+                    memcpy(newBuf, ptr, size<oldSize? size : oldSize);
+                    if ( original_ptrs->orig_free ){
+                        original_ptrs->orig_free( ptr );
+                    }
+                }
+                return newBuf;
+            }else{
+                //We can't do anything with this. Just keeping old pointer
+                return NULL;
+            }
+        } else {
+            if ( original_ptrs->orig_free ){
+                original_ptrs->orig_free( ptr );
+            }
+            return NULL;
+        }
+    }
+#endif
+    return NULL;
+}
+
+extern "C" void scalable_aligned_free(void *ptr)
+{
+    scalable_free(ptr);
+}
+
+/********* end code for aligned allocation API **********/
+
+/********* Code for scalable_msize       ***********/
+
+/*
+ * Returns the size of a memory block allocated in the heap.
+ */
+extern "C" size_t scalable_msize(void* ptr)
+{
+    if (ptr) {
+        MALLOC_ASSERT(isRecognized(ptr), "Invalid pointer in scalable_msize detected.");
+        if (isLargeObject(ptr)) {
+            LargeMemoryBlock* lmb = ((LargeObjectHdr*)ptr - 1)->memoryBlock;
+            return lmb->objectSize;
+        } else {
+            Block* block = (Block *)alignDown(ptr, blockSize);
+#if MALLOC_CHECK_RECURSION
+            size_t size = block->getSize()? block->getSize() : StartupBlock::msize(ptr);
+#else
+            size_t size = block->getSize();
+#endif
+            MALLOC_ASSERT(size>0 && size<minLargeObjectSize, ASSERT_TEXT);
+            return size;
+        }
+    }
+    errno = EINVAL;
+    // Unlike _msize, return 0 in case of parameter error.
+    // Returning size_t(-1) looks more like the way to troubles.
+    return 0;
+}
+
+/*
+ * A variant that provides additional memory safety, by checking whether the given address
+ * was obtained with this allocator, and if not redirecting to the provided alternative call.
+ */
+extern "C" size_t safer_scalable_msize (void *object, size_t (*original_msize)(void*)) 
+{
+    if (object) {
+        // Check if the memory was allocated by scalable_malloc
+        if (isRecognized(object))
+            return scalable_msize(object);
+        else if (original_msize)
+            return original_msize(object);
+    }
+    // object is NULL or unknown
+    errno = EINVAL;
+    return 0;
+}
+
+/********* End code for scalable_msize   ***********/
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/large_objects.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/large_objects.cpp
new file mode 100644 (file)
index 0000000..b8b9a9d
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbbmalloc_internal.h"
+
+/********* Allocation of large objects ************/
+
+
+namespace rml {
+namespace internal {
+
+static struct LargeBlockCacheStat {
+    uintptr_t age;
+    size_t cacheSize;
+} loCacheStat;
+
+ /*
+  * The number of bins to cache large objects.
+  */
+const uint32_t numLargeBlockBins = 1024; // for 1024 max cached size is near 8MB
+
+class CachedBlocksList {
+    LargeMemoryBlock *first,
+                     *last;
+    /* age of an oldest block in the list; equal to last->age, if last defined,
+       used for quick cheching it without acquiring the lock. */
+    uintptr_t     oldest;
+    /* currAge when something was excluded out of list because of the age,
+       not because of cache hit */
+    uintptr_t     lastCleanedAge;
+    /* Current threshold value for the blocks of a particular size. 
+       Set on cache miss. */
+    intptr_t      ageThreshold;
+
+    MallocMutex   lock;
+    /* CachedBlocksList should be placed in zero-initialized memory,
+       ctor not needed. */
+    CachedBlocksList();
+public:
+    inline void push(LargeMemoryBlock* ptr);
+    inline LargeMemoryBlock* pop();
+    void releaseLastIfOld(uintptr_t currAge, size_t size);
+};
+
+/*
+ * Array of bins with lists of recently freed large objects cached for re-use.
+ */
+static char globalCachedBlockBinsSpace[sizeof(CachedBlocksList)*numLargeBlockBins];
+static CachedBlocksList* globalCachedBlockBins = (CachedBlocksList*)globalCachedBlockBinsSpace;
+
+/*
+ * Large Objects are the only objects in the system that begin 
+ * on a 16K byte boundary since the blocks used for smaller objects 
+ * have the Block structure at each 16K boundary.
+ */
+static uintptr_t cleanupCacheIfNeed();
+
+void CachedBlocksList::push(LargeMemoryBlock *ptr)
+{   
+    ptr->prev = NULL;
+    ptr->age  = cleanupCacheIfNeed ();
+
+    MallocMutex::scoped_lock scoped_cs(lock);
+    ptr->next = first;
+    first = ptr;
+    if (ptr->next) ptr->next->prev = ptr;
+    if (!last) {
+        MALLOC_ASSERT(0 == oldest, ASSERT_TEXT);
+        oldest = ptr->age;
+        last = ptr;
+    }
+}
+
+LargeMemoryBlock *CachedBlocksList::pop()
+{   
+    uintptr_t currAge = cleanupCacheIfNeed();
+    LargeMemoryBlock *result=NULL;
+    {
+        MallocMutex::scoped_lock scoped_cs(lock);
+        if (first) {
+            result = first;
+            first = result->next;
+            if (first)  
+                first->prev = NULL;
+            else {
+                last = NULL;
+                oldest = 0;
+            }
+        } else {
+            /* If cache miss occured, set ageThreshold to twice the difference 
+               between current time and last time cache was cleaned. */
+            ageThreshold = 2*(currAge - lastCleanedAge);
+        }
+    }
+    return result;
+}
+
+void CachedBlocksList::releaseLastIfOld(uintptr_t currAge, size_t size)
+{
+    LargeMemoryBlock *toRelease = NULL;
+    /* oldest may be more recent then age, that's why cast to signed type
+       was used. age overflow is also processed correctly. */
+    if (last && (intptr_t)(currAge - oldest) > ageThreshold) {
+        MallocMutex::scoped_lock scoped_cs(lock);
+        // double check
+        if (last && (intptr_t)(currAge - last->age) > ageThreshold) {
+            do {
+                last = last->prev;
+            } while (last && (intptr_t)(currAge - last->age) > ageThreshold);
+            if (last) {
+                toRelease = last->next;
+                oldest = last->age;
+                last->next = NULL;
+            } else {
+                toRelease = first;
+                first = NULL;
+                oldest = 0;
+            }
+            MALLOC_ASSERT( toRelease, ASSERT_TEXT );
+            lastCleanedAge = toRelease->age;
+        } 
+        else 
+            return;
+    }
+    while ( toRelease ) {
+        LargeMemoryBlock *helper = toRelease->next;
+        removeBackRef(toRelease->backRefIdx);
+        freeRawMemory(toRelease, size, toRelease->fromMapMemory);
+        toRelease = helper;
+    }
+}
+
+static uintptr_t cleanupCacheIfNeed ()
+{
+    /* loCacheStat.age overflow is OK, as we only want difference between 
+     * its current value and some recent.
+     *
+     * Both malloc and free should increment loCacheStat.age, as in 
+     * a different case multiple cached blocks would have same age,
+     * and accuracy of predictors suffers.
+     */
+    uintptr_t currAge = (uintptr_t)AtomicIncrement((intptr_t&)loCacheStat.age);
+
+    if ( 0 == currAge % cacheCleanupFreq ) {
+        size_t objSize;
+        int i;
+
+        for (i = numLargeBlockBins-1, 
+             objSize = (numLargeBlockBins-1)*largeBlockCacheStep+blockSize; 
+             i >= 0; 
+             i--, objSize-=largeBlockCacheStep) {
+            /* cached block size on iteration is
+             * i*largeBlockCacheStep+blockSize, it seems iterative
+             * computation of it improves performance.
+             */
+            // release from cache blocks that are older than ageThreshold
+            globalCachedBlockBins[i].releaseLastIfOld(currAge, objSize);
+        }
+    }
+    return currAge;
+}
+
+static LargeMemoryBlock* getCachedLargeBlock (size_t size)
+{
+    MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT );
+    LargeMemoryBlock *lmb = NULL;
+    // blockSize is the minimal alignment and thus the minimal size of a large object.
+    size_t idx = (size-minLargeObjectSize)/largeBlockCacheStep;
+    if (idx<numLargeBlockBins) {
+        lmb = globalCachedBlockBins[idx].pop();
+        if (lmb) {
+            MALLOC_ITT_SYNC_ACQUIRED(globalCachedBlockBins+idx);
+            STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeBlk);
+        }
+    }
+    return lmb;
+}
+
+void* mallocLargeObject (size_t size, size_t alignment, bool startupAlloc)
+{
+    LargeMemoryBlock* lmb;
+    size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr);
+    size_t allocationSize = alignUp(size+headersSize+alignment, largeBlockCacheStep);
+
+    if (startupAlloc || !(lmb = getCachedLargeBlock(allocationSize))) {
+        BackRefIdx backRefIdx;
+
+        if ((backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true)).isInvalid()) 
+            return NULL;
+        lmb = (LargeMemoryBlock*)getRawMemory(allocationSize, /*useMapMem=*/startupAlloc);
+        if (!lmb) return NULL;
+        lmb->fromMapMemory = startupAlloc;
+        lmb->backRefIdx = backRefIdx;
+        lmb->unalignedSize = allocationSize;
+        STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj);
+    }
+
+    void *alignedArea = (void*)alignUp((uintptr_t)lmb+headersSize, alignment);
+    LargeObjectHdr *header = (LargeObjectHdr*)alignedArea-1;
+    header->memoryBlock = lmb;
+    header->backRefIdx = lmb->backRefIdx;
+    setBackRef(header->backRefIdx, header);
+    lmb->objectSize = size;
+
+    MALLOC_ASSERT( isLargeObject(alignedArea), ASSERT_TEXT );
+    return alignedArea;
+}
+
+static bool freeLargeObjectToCache (LargeMemoryBlock* largeBlock)
+{
+    size_t size = largeBlock->unalignedSize;
+    size_t idx = (size-minLargeObjectSize)/largeBlockCacheStep;
+    if (idx<numLargeBlockBins) {
+        MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT );
+        MALLOC_ITT_SYNC_RELEASING(globalCachedBlockBins+idx);
+        globalCachedBlockBins[idx].push(largeBlock);
+
+        STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeBlk);
+        return true;
+    }
+    return false;
+}
+
+void freeLargeObject (void *object)
+{
+    LargeObjectHdr *header = (LargeObjectHdr*)object - 1;
+
+    // overwrite backRefIdx to simplify double free detection
+    header->backRefIdx = BackRefIdx();
+    if (!freeLargeObjectToCache(header->memoryBlock)) {
+        removeBackRef(header->memoryBlock->backRefIdx);
+        freeRawMemory(header->memoryBlock, header->memoryBlock->unalignedSize, 
+                      /*useMapMem=*/ header->memoryBlock->fromMapMemory);
+        STAT_increment(getThreadId(), ThreadCommonCounters, freeLargeObj);
+    }
+}
+
+/*********** End allocation of large objects **********/
+
+
+
+} // namespace internal
+} // namespace rml
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..cd766d7
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+
+scalable_calloc;
+scalable_free;
+scalable_malloc;
+scalable_realloc;
+scalable_posix_memalign;
+scalable_aligned_malloc;
+scalable_aligned_realloc;
+scalable_aligned_free;
+__TBB_internal_calloc;
+__TBB_internal_free;
+__TBB_internal_malloc;
+__TBB_internal_realloc;
+__TBB_internal_posix_memalign;
+scalable_msize;
+
+local:
+
+/* TBB symbols */
+*3rml8internal*;
+*3tbb*;
+*__TBB*;
+__itt_*;
+ITT_DoOneTimeInitialization;
+TBB_runtime_interface_version;
+
+/* Intel Compiler (libirc) symbols */
+__intel_*;
+_intel_*;
+get_memcpy_largest_cachelinesize;
+get_memcpy_largest_cache_size;
+get_mem_ops_method;
+init_mem_ops_method;
+irc__get_msg;
+irc__print;
+override_mem_ops_method;
+set_memcpy_largest_cachelinesize;
+set_memcpy_largest_cache_size;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def
new file mode 100644 (file)
index 0000000..ba22eca
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+calloc;
+free;
+malloc;
+realloc;
+posix_memalign;
+memalign;
+valloc;
+pvalloc;
+mallinfo;
+mallopt;
+__TBB_malloc_proxy;
+__TBB_internal_find_original_malloc;
+_ZdaPv; /* next ones are new/delete */
+_ZdaPvRKSt9nothrow_t;
+_ZdlPv;
+_ZdlPvRKSt9nothrow_t;
+_Znaj;
+_ZnajRKSt9nothrow_t;
+_Znwj;
+_ZnwjRKSt9nothrow_t;
+
+local:
+
+/* TBB symbols */
+*3rml8internal*;
+*3tbb*;
+*__TBB*;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def
new file mode 100644 (file)
index 0000000..b6eb7ae
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+calloc;
+free;
+malloc;
+realloc;
+posix_memalign;
+memalign;
+valloc;
+pvalloc;
+mallinfo;
+mallopt;
+__TBB_malloc_proxy;
+__TBB_internal_find_original_malloc;
+_ZdaPv;  /* next ones are new/delete */
+_ZdaPvRKSt9nothrow_t;
+_ZdlPv;
+_ZdlPvRKSt9nothrow_t;
+_Znam;
+_ZnamRKSt9nothrow_t;
+_Znwm;
+_ZnwmRKSt9nothrow_t;
+
+local:
+
+/* TBB symbols */
+*3rml8internal*;
+*3tbb*;
+*__TBB*;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def
new file mode 100644 (file)
index 0000000..b6eb7ae
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+calloc;
+free;
+malloc;
+realloc;
+posix_memalign;
+memalign;
+valloc;
+pvalloc;
+mallinfo;
+mallopt;
+__TBB_malloc_proxy;
+__TBB_internal_find_original_malloc;
+_ZdaPv;  /* next ones are new/delete */
+_ZdaPvRKSt9nothrow_t;
+_ZdlPv;
+_ZdlPvRKSt9nothrow_t;
+_Znam;
+_ZnamRKSt9nothrow_t;
+_Znwm;
+_ZnwmRKSt9nothrow_t;
+
+local:
+
+/* TBB symbols */
+*3rml8internal*;
+*3tbb*;
+*__TBB*;
+
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..295fce2
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# MemoryAllocator.cpp
+_scalable_calloc
+_scalable_free
+_scalable_malloc
+_scalable_realloc
+_scalable_posix_memalign
+_scalable_aligned_malloc
+_scalable_aligned_realloc
+_scalable_aligned_free
+_scalable_msize
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..295fce2
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+#
+# This file is part of Threading Building Blocks.
+#
+# Threading Building Blocks is free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# Threading Building Blocks is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Threading Building Blocks; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+# As a special exception, you may use this file as part of a free software
+# library without restriction.  Specifically, if other files instantiate
+# templates or use macros or inline functions from this file, or you compile
+# this file and link it with other files to produce an executable, this
+# file does not by itself cause the resulting executable to be covered by
+# the GNU General Public License.  This exception does not however
+# invalidate any other reasons why the executable file might be covered by
+# the GNU General Public License.
+
+# MemoryAllocator.cpp
+_scalable_calloc
+_scalable_free
+_scalable_malloc
+_scalable_realloc
+_scalable_posix_memalign
+_scalable_aligned_malloc
+_scalable_aligned_realloc
+_scalable_aligned_free
+_scalable_msize
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.cpp
new file mode 100644 (file)
index 0000000..87b03e5
--- /dev/null
@@ -0,0 +1,472 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "proxy.h"
+
+#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX)
+    #if TBB_USE_EXCEPTIONS
+        #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0.
+    #elif !defined(TBB_USE_EXCEPTIONS)
+        #define TBB_USE_EXCEPTIONS 0
+    #endif
+#elif !defined(TBB_USE_EXCEPTIONS)
+    #define TBB_USE_EXCEPTIONS 1
+#endif
+
+#if MALLOC_LD_PRELOAD
+
+/*** service functions and variables ***/
+
+#include <unistd.h> // for sysconf
+#include <dlfcn.h>
+
+static long memoryPageSize;
+
+static inline void initPageSize()
+{
+    memoryPageSize = sysconf(_SC_PAGESIZE);
+}
+
+/* For the expected behaviour (i.e., finding malloc/free/etc from libc.so, 
+   not from ld-linux.so) dlsym(RTLD_NEXT) should be called from 
+   a LD_PRELOADed library, not another dynamic library.
+   So we have to put find_original_malloc here.
+ */
+extern "C" bool __TBB_internal_find_original_malloc(int num, const char *names[],
+                                                        void *ptrs[])
+{
+    for (int i=0; i<num; i++)
+        if (NULL == (ptrs[i] = dlsym (RTLD_NEXT, names[i])))
+            return false;
+
+    return true;
+}
+
+/* __TBB_malloc_proxy used as a weak symbol by libtbbmalloc for: 
+   1) detection that the proxy library is loaded
+   2) check that dlsym("malloc") found something different from our replacement malloc
+*/
+extern "C" void *__TBB_malloc_proxy() __attribute__ ((alias ("malloc")));
+
+#ifndef __THROW
+#define __THROW
+#endif
+
+/*** replacements for malloc and the family ***/
+
+extern "C" {
+
+void *malloc(size_t size) __THROW
+{
+    return __TBB_internal_malloc(size);
+}
+
+void * calloc(size_t num, size_t size) __THROW
+{
+    return __TBB_internal_calloc(num, size);
+}
+
+void free(void *object) __THROW
+{
+    __TBB_internal_free(object);
+}
+
+void * realloc(void* ptr, size_t sz) __THROW
+{
+    return __TBB_internal_realloc(ptr, sz);
+}
+
+int posix_memalign(void **memptr, size_t alignment, size_t size) __THROW
+{
+    return __TBB_internal_posix_memalign(memptr, alignment, size);
+}
+
+/* The older *NIX interface for aligned allocations;
+   it's formally substituted by posix_memalign and deprecated,
+   so we do not expect it to cause cyclic dependency with C RTL. */
+void * memalign(size_t alignment, size_t size)  __THROW
+{
+    return scalable_aligned_malloc(size, alignment);
+}
+
+/* valloc allocates memory aligned on a page boundary */
+void * valloc(size_t size) __THROW
+{
+    if (! memoryPageSize) initPageSize();
+
+    return scalable_aligned_malloc(size, memoryPageSize);
+}
+
+/* pvalloc allocates smallest set of complete pages which can hold 
+   the requested number of bytes. Result is aligned on page boundary. */
+void * pvalloc(size_t size) __THROW
+{
+    if (! memoryPageSize) initPageSize();
+    // align size up to the page size
+    size = ((size-1) | (memoryPageSize-1)) + 1;
+
+    return scalable_aligned_malloc(size, memoryPageSize);
+}
+
+int mallopt(int /*param*/, int /*value*/) __THROW
+{
+    return 1;
+}
+
+} /* extern "C" */
+
+#if __linux__
+#include <malloc.h>
+#include <string.h> // for memset
+
+extern "C" struct mallinfo mallinfo() __THROW
+{
+    struct mallinfo m;
+    memset(&m, 0, sizeof(struct mallinfo));
+
+    return m;
+}
+#endif /* __linux__ */
+
+/*** replacements for global operators new and delete ***/
+
+#include <new>
+
+void * operator new(size_t sz) throw (std::bad_alloc) {
+    void *res = scalable_malloc(sz);
+#if TBB_USE_EXCEPTIONS
+    if (NULL == res)
+        throw std::bad_alloc();
+#endif /* TBB_USE_EXCEPTIONS */
+    return res;
+}
+void* operator new[](size_t sz) throw (std::bad_alloc) {
+    void *res = scalable_malloc(sz);
+#if TBB_USE_EXCEPTIONS
+    if (NULL == res)
+        throw std::bad_alloc();
+#endif /* TBB_USE_EXCEPTIONS */
+    return res;
+}
+void operator delete(void* ptr) throw() {
+    scalable_free(ptr);
+}
+void operator delete[](void* ptr) throw() {
+    scalable_free(ptr);
+}
+void* operator new(size_t sz, const std::nothrow_t&) throw() {
+    return scalable_malloc(sz);
+}
+void* operator new[](std::size_t sz, const std::nothrow_t&) throw() {
+    return scalable_malloc(sz);
+}
+void operator delete(void* ptr, const std::nothrow_t&) throw() {
+    scalable_free(ptr);
+}
+void operator delete[](void* ptr, const std::nothrow_t&) throw() {
+    scalable_free(ptr);
+}
+
+#endif /* MALLOC_LD_PRELOAD */
+
+
+#ifdef _WIN32
+#include <windows.h>
+
+#include <stdio.h>
+#include "tbb_function_replacement.h"
+
+void safer_scalable_free2( void *ptr)
+{
+    safer_scalable_free( ptr, NULL );
+}
+
+// we do not support _expand();
+void* safer_expand( void *, size_t )
+{
+    return NULL;
+}
+
+#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(CRTLIB)\
+void (*orig_free_##CRTLIB)(void*);                                                        \
+void safer_scalable_free_##CRTLIB( void *ptr)                                             \
+{                                                                                         \
+    safer_scalable_free( ptr, orig_free_##CRTLIB );                                       \
+}                                                                                         \
+                                                                                          \
+size_t (*orig_msize_##CRTLIB)(void*);                                                     \
+size_t safer_scalable_msize_##CRTLIB( void *ptr)                                          \
+{                                                                                         \
+    return safer_scalable_msize( ptr, orig_msize_##CRTLIB );                              \
+}                                                                                         \
+                                                                                          \
+void* safer_scalable_realloc_##CRTLIB( void *ptr, size_t size )                           \
+{                                                                                         \
+    orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB};                      \
+    return safer_scalable_realloc( ptr, size, &func_ptrs );                               \
+}                                                                                         \
+                                                                                          \
+void* safer_scalable_aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t aligment )  \
+{                                                                                         \
+    orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB};                      \
+    return safer_scalable_aligned_realloc( ptr, size, aligment, &func_ptrs );             \
+} 
+
+// limit is 30 bytes/60 symbols per line
+const char* known_bytecodes[] = {
+#if _WIN64
+    "4883EC284885C974",       //release free() win64
+    "4883EC384885C975",       //release msize() win64
+    "4885C974375348",         //release free() 8.0.50727.42 win64
+    "48894C24084883EC28BA",   //debug prologue for win64
+    "4C8BC1488B0DA6E4040033", //win64 SDK
+    "4883EC284885C975",       //release msize() 10.0.21003.1 win64
+#else
+    "558BEC6A018B",           //debug free() & _msize() 8.0.50727.4053 win32
+    "6A1868********E8",       //release free() 8.0.50727.4053 win32
+    "6A1C68********E8",       //release _msize() 8.0.50727.4053 win32
+    "8BFF558BEC6A",           //debug free() & _msize() 9.0.21022.8 win32
+    "8BFF558BEC83",           //debug free() & _msize() 10.0.21003.1 win32
+#endif
+    NULL
+    };
+
+#if _WIN64
+#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "free",  (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d,  known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d );  \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "free",  (FUNCPTR)safer_scalable_free_ ## CRT_VER,       known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER );       \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER,      known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER );      \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc",         (FUNCPTR)safer_scalable_realloc_ ## CRT_VER ## d,         0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "realloc",         (FUNCPTR)safer_scalable_realloc_ ## CRT_VER,              0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free",   (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d,            0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_aligned_free",   (FUNCPTR)safer_scalable_free_ ## CRT_VER,                 0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER,      0, NULL);
+#else
+#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "free",  (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d,  known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d );  \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "free",  (FUNCPTR)safer_scalable_free_ ## CRT_VER,       known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER );       \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER,      known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER );      \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc",         (FUNCPTR)safer_scalable_realloc_ ## CRT_VER ## d,         0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "realloc",         (FUNCPTR)safer_scalable_realloc_ ## CRT_VER,              0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free",   (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d,            0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_aligned_free",   (FUNCPTR)safer_scalable_free_ ## CRT_VER,                 0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \
+    ReplaceFunctionWithStore( #CRT_VER  ".dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER,      0, NULL);
+#endif
+
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70d);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71d);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80d);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90d);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100d);
+__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100);
+
+
+/*** replacements for global operators new and delete ***/
+
+#include <new>
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning( push )
+#pragma warning( disable : 4290 )
+#endif
+
+void * operator_new(size_t sz) throw (std::bad_alloc) {
+    void *res = scalable_malloc(sz);
+    if (NULL == res) throw std::bad_alloc();
+    return res;
+}
+void* operator_new_arr(size_t sz) throw (std::bad_alloc) {
+    void *res = scalable_malloc(sz);
+    if (NULL == res) throw std::bad_alloc();
+    return res;
+}
+void operator_delete(void* ptr) throw() {
+    safer_scalable_free2(ptr);
+}
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning( pop )
+#endif
+
+void operator_delete_arr(void* ptr) throw() {
+    safer_scalable_free2(ptr);
+}
+void* operator_new_t(size_t sz, const std::nothrow_t&) throw() {
+    return scalable_malloc(sz);
+}
+void* operator_new_arr_t(std::size_t sz, const std::nothrow_t&) throw() {
+    return scalable_malloc(sz);
+}
+void operator_delete_t(void* ptr, const std::nothrow_t&) throw() {
+    safer_scalable_free2(ptr);
+}
+void operator_delete_arr_t(void* ptr, const std::nothrow_t&) throw() {
+    safer_scalable_free2(ptr);
+}
+
+const char* modules_to_replace[] = {
+    "msvcr80d.dll",
+    "msvcr80.dll",
+    "msvcr90d.dll",
+    "msvcr90.dll",
+    "msvcr100d.dll",
+    "msvcr100.dll",
+    "msvcr70d.dll",
+    "msvcr70.dll",
+    "msvcr71d.dll",
+    "msvcr71.dll",
+    };
+
+/*
+We need to replace following functions:
+malloc
+calloc
+_aligned_malloc
+_expand (by dummy implementation)
+??2@YAPAXI@Z      operator new                         (ia32)
+??_U@YAPAXI@Z     void * operator new[] (size_t size)  (ia32)
+??3@YAXPAX@Z      operator delete                      (ia32)  
+??_V@YAXPAX@Z     operator delete[]                    (ia32)
+??2@YAPEAX_K@Z    void * operator new(unsigned __int64)   (intel64)
+??_V@YAXPEAX@Z    void * operator new[](unsigned __int64) (intel64)
+??3@YAXPEAX@Z     operator delete                         (intel64)  
+??_V@YAXPEAX@Z    operator delete[]                       (intel64)
+??2@YAPAXIABUnothrow_t@std@@@Z      void * operator new (size_t sz, const std::nothrow_t&) throw()  (optional)
+??_U@YAPAXIABUnothrow_t@std@@@Z     void * operator new[] (size_t sz, const std::nothrow_t&) throw() (optional)
+
+and these functions have runtime-specific replacement:
+realloc
+free
+_msize
+_aligned_realloc
+_aligned_free
+*/
+
+typedef struct FRData_t {
+    //char *_module;
+    const char *_func;
+    FUNCPTR _fptr;
+    FRR_ON_ERROR _on_error;
+} FRDATA;
+
+FRDATA routines_to_replace[] = {
+    { "malloc",  (FUNCPTR)scalable_malloc, FRR_FAIL },
+    { "calloc",  (FUNCPTR)scalable_calloc, FRR_FAIL },
+    { "_aligned_malloc",  (FUNCPTR)scalable_aligned_malloc, FRR_FAIL },
+    { "_expand",  (FUNCPTR)safer_expand, FRR_IGNORE },
+#if _WIN64
+    { "??2@YAPEAX_K@Z", (FUNCPTR)operator_new, FRR_FAIL },
+    { "??_U@YAPEAX_K@Z", (FUNCPTR)operator_new_arr, FRR_FAIL },
+    { "??3@YAXPEAX@Z", (FUNCPTR)operator_delete, FRR_FAIL },
+    { "??_V@YAXPEAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL },
+#else 
+    { "??2@YAPAXI@Z", (FUNCPTR)operator_new, FRR_FAIL },
+    { "??_U@YAPAXI@Z", (FUNCPTR)operator_new_arr, FRR_FAIL },
+    { "??3@YAXPAX@Z", (FUNCPTR)operator_delete, FRR_FAIL },
+    { "??_V@YAXPAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL },
+#endif
+    { "??2@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_t, FRR_IGNORE },
+    { "??_U@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_arr_t, FRR_IGNORE }
+};
+
+#ifndef UNICODE
+void ReplaceFunctionWithStore( const char*dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc )
+#else
+void ReplaceFunctionWithStore( const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc )
+#endif
+{
+    FRR_TYPE type = ReplaceFunction( dllName, funcName, newFunc, opcodes, origFunc );
+    if (type == FRR_NODLL) return;
+    if ( type != FRR_OK )
+    {
+        fprintf(stderr, "Failed to replace function %s in module %s\n",
+                funcName, dllName);
+        exit(1);
+    }
+}
+
+void doMallocReplacement()
+{
+    int i,j;
+
+    // Replace functions without storing original code
+    int modules_to_replace_count = sizeof(modules_to_replace) / sizeof(modules_to_replace[0]);
+    int routines_to_replace_count = sizeof(routines_to_replace) / sizeof(routines_to_replace[0]);
+    for ( j=0; j<modules_to_replace_count; j++ )
+        for (i = 0; i < routines_to_replace_count; i++)
+        {
+            FRR_TYPE type = ReplaceFunction( modules_to_replace[j], routines_to_replace[i]._func, routines_to_replace[i]._fptr, NULL, NULL );
+            if (type == FRR_NODLL) break;
+            if (type != FRR_OK && routines_to_replace[i]._on_error==FRR_FAIL)
+            {
+                fprintf(stderr, "Failed to replace function %s in module %s\n",
+                        routines_to_replace[i]._func, modules_to_replace[j]);
+                exit(1);
+            }
+        }
+
+    // Replace functions and keep backup of original code (separate for each runtime)
+    __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr70)
+    __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr71)
+    __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr80)
+    __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr90)
+    __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr100)
+}
+
+extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID reserved )
+{
+
+    if ( callReason==DLL_PROCESS_ATTACH && reserved && hInst ) {
+#if TBBMALLOC_USE_TBB_FOR_ALLOCATOR_ENV_CONTROLLED
+        char pinEnvVariable[50];
+        if( GetEnvironmentVariable("TBBMALLOC_USE_TBB_FOR_ALLOCATOR", pinEnvVariable, 50))
+        {
+            doMallocReplacement();
+        }
+#else
+        doMallocReplacement();
+#endif
+    }
+
+    return TRUE;
+}
+
+// Just to make the linker happy and link the DLL to the application
+extern "C" __declspec(dllexport) void __TBB_malloc_proxy()
+{
+
+}
+
+#endif //_WIN32
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/proxy.h
new file mode 100644 (file)
index 0000000..315f628
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef _TBB_malloc_proxy_H_
+#define _TBB_malloc_proxy_H_
+
+#if __linux__
+#define MALLOC_LD_PRELOAD 1
+#endif
+
+// MALLOC_LD_PRELOAD depends on MALLOC_CHECK_RECURSION stuff
+#if __linux__ || __APPLE__ || __sun || __FreeBSD__ || MALLOC_LD_PRELOAD
+#define MALLOC_CHECK_RECURSION 1
+#endif
+
+#include <stddef.h>
+
+extern "C" {
+    void * scalable_malloc(size_t size);
+    void * scalable_calloc(size_t nobj, size_t size);
+    void   scalable_free(void *ptr);
+    void * scalable_realloc(void* ptr, size_t size);
+    void * scalable_aligned_malloc(size_t size, size_t alignment);
+    void * scalable_aligned_realloc(void* ptr, size_t size, size_t alignment);
+    int    scalable_posix_memalign(void **memptr, size_t alignment, size_t size);
+    size_t scalable_msize(void *ptr);
+    void   safer_scalable_free( void *ptr, void (*original_free)(void*));
+    void * safer_scalable_realloc( void *ptr, size_t, void* );
+    void * safer_scalable_aligned_realloc( void *ptr, size_t, size_t, void* );
+    size_t safer_scalable_msize( void *ptr, size_t (*orig_msize_crt80d)(void*));
+
+    void * __TBB_internal_malloc(size_t size);
+    void * __TBB_internal_calloc(size_t num, size_t size);
+    void   __TBB_internal_free(void *ptr);
+    void * __TBB_internal_realloc(void* ptr, size_t sz);
+    int    __TBB_internal_posix_memalign(void **memptr, size_t alignment, size_t size);
+    
+    bool   __TBB_internal_find_original_malloc(int num, const char *names[], void *table[]);
+} // extern "C"
+
+// Struct with original free() and _msize() pointers
+struct orig_ptrs {
+    void   (*orig_free) (void*);  
+    size_t (*orig_msize)(void*); 
+};
+
+#endif /* _TBB_malloc_proxy_H_ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp
new file mode 100644 (file)
index 0000000..02ebea6
--- /dev/null
@@ -0,0 +1,476 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Works on windows only
+#ifdef _WIN32
+#define _CRT_SECURE_NO_DEPRECATE 1
+#define __TBB_NO_IMPLICIT_LINKAGE 1
+
+#include <windows.h>
+#include <new>
+#include <stdio.h>
+#include "tbb_function_replacement.h"
+
+#include "tbb/tbb_config.h"
+#include "tbb/tbb_stddef.h"
+#include "../tbb/tbb_assert_impl.h"
+
+inline UINT_PTR Ptr2Addrint(LPVOID ptr)
+{
+    Int2Ptr i2p;
+    i2p.lpv = ptr;
+    return i2p.uip;
+}
+
+inline LPVOID Addrint2Ptr(UINT_PTR ptr)
+{
+    Int2Ptr i2p;
+    i2p.uip = ptr;
+    return i2p.lpv;
+}
+
+// Is the distance between addr1 and addr2 smaller than dist
+inline bool IsInDistance(UINT_PTR addr1, UINT_PTR addr2, __int64 dist)
+{
+    __int64 diff = addr1>addr2 ? addr1-addr2 : addr2-addr1;
+    return diff<dist;
+}
+
+/*
+ * When inserting a probe in 64 bits process the distance between the insertion
+ * point and the target may be bigger than 2^32. In this case we are using 
+ * indirect jump through memory where the offset to this memory location
+ * is smaller than 2^32 and it contains the absolute address (8 bytes).
+ *
+ * This class is used to hold the pages used for the above trampolines.
+ * Since this utility will be used to replace malloc functions this implementation
+ * doesn't allocate memory dynamically.
+ *
+ * The struct MemoryBuffer holds the data about a page in the memory used for
+ * replacing functions in Intel64 where the target is too far to be replaced
+ * with a short jump. All the calculations of m_base and m_next are in a multiple
+ * of SIZE_OF_ADDRESS (which is 8 in Win64).
+ */
+class MemoryProvider {
+private:
+    struct MemoryBuffer {
+        UINT_PTR m_base;    // base address of the buffer
+        UINT_PTR m_next;    // next free location in the buffer
+        DWORD    m_size;    // size of buffer
+
+        // Default constructor
+        MemoryBuffer() : m_base(0), m_next(0), m_size(0) {}
+
+        // Constructor
+        MemoryBuffer(void *base, DWORD size)
+        {
+            m_base = Ptr2Addrint(base);
+            m_next = m_base;
+            m_size = size;
+        }
+    };
+
+MemoryBuffer *CreateBuffer(UINT_PTR addr)
+    {
+        // No more room in the pages database
+        if (m_lastBuffer - m_pages == MAX_NUM_BUFFERS)
+            return 0;
+
+        void *newAddr = Addrint2Ptr(addr);
+        // Get information for the region which the given address belongs to
+        MEMORY_BASIC_INFORMATION memInfo;
+        if (VirtualQuery(newAddr, &memInfo, sizeof(memInfo)) != sizeof(memInfo))
+            return 0;
+
+        for(;;) {
+            // The new address to check is beyond the current region and aligned to allocation size
+            newAddr = Addrint2Ptr( (Ptr2Addrint(memInfo.BaseAddress) + memInfo.RegionSize + m_allocSize) & ~(UINT_PTR)(m_allocSize-1) );
+
+            // Check that the address is in the right distance.
+            // VirtualAlloc can only round the address down; so it will remain in the right distance
+            if (!IsInDistance(addr, Ptr2Addrint(newAddr), MAX_DISTANCE))
+                break;
+
+            if (VirtualQuery(newAddr, &memInfo, sizeof(memInfo)) != sizeof(memInfo))
+                break;
+
+            if (memInfo.State == MEM_FREE && memInfo.RegionSize >= m_allocSize)
+            {
+                // Found a free region, try to allocate a page in this region
+                void *newPage = VirtualAlloc(newAddr, m_allocSize, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
+                if (!newPage)
+                    break;
+
+                // Add the new page to the pages database
+                MemoryBuffer *pBuff = new (m_lastBuffer) MemoryBuffer(newPage, m_allocSize);
+                ++m_lastBuffer;
+                return pBuff;
+            }
+        }
+
+        // Failed to find a buffer in the distance
+        return 0;
+    }
+
+public:
+    MemoryProvider() 
+    { 
+        SYSTEM_INFO sysInfo;
+        GetSystemInfo(&sysInfo);
+        m_allocSize = sysInfo.dwAllocationGranularity; 
+        m_lastBuffer = &m_pages[0];
+    }
+
+    // We can't free the pages in the destructor because the trampolines
+    // are using these memory locations and a replaced function might be called
+    // after the destructor was called.
+    ~MemoryProvider() 
+    {
+    }
+
+    // Return a memory location in distance less than 2^31 from input address 
+    UINT_PTR GetLocation(UINT_PTR addr)
+    {
+        MemoryBuffer *pBuff = m_pages;
+        for (; pBuff<m_lastBuffer && IsInDistance(pBuff->m_next, addr, MAX_DISTANCE); ++pBuff)
+        {
+            if (pBuff->m_next < pBuff->m_base + pBuff->m_size)
+            {
+                UINT_PTR loc = pBuff->m_next;
+                pBuff->m_next += MAX_PROBE_SIZE;
+                return loc;
+            }
+        }
+
+        pBuff = CreateBuffer(addr);
+        if(!pBuff)
+            return 0;
+
+        UINT_PTR loc = pBuff->m_next;
+        pBuff->m_next += MAX_PROBE_SIZE;
+        return loc;
+    }
+
+private:
+    MemoryBuffer m_pages[MAX_NUM_BUFFERS];
+    MemoryBuffer *m_lastBuffer;
+    DWORD m_allocSize;
+};
+
+static MemoryProvider memProvider;
+
+// Compare opcodes from dictionary (str1) and opcodes from code (str2)
+// str1 might contain '*' to mask adresses
+// RETURN: NULL if opcodes did not match, string lentgh of str1 on success
+size_t compareStrings( const char *str1, const char *str2 )
+{
+   size_t str1Lentgh = strlen(str1);
+   for (size_t i=0; i<str1Lentgh; i++){
+       if( str1[i] != '*' && str1[i] != str2[i] ) return 0;
+   }
+   return str1Lentgh;
+}
+
+// Check function prologue with know prologues from the dictionary
+// opcodes - dictionary
+// inpAddr - pointer to function prologue
+// Dictionary contains opcodes for several full asm instrutions
+// + one opcode byte for the next asm instruction for safe address processing
+// RETURN: number of bytes for safe bytes replacement 
+// (matched_pattern/2-1)
+UINT CheckOpcodes( const char ** opcodes, void *inpAddr )
+{
+    static size_t opcodesStringsCount = 0;
+    static size_t maxOpcodesLength = 0;
+    static size_t opcodes_pointer = (size_t)opcodes;
+    char opcodeString[61];
+    size_t i;
+    size_t result;
+
+    // Get the values for static variables
+    // max length and number of patterns
+    if( !opcodesStringsCount || opcodes_pointer != (size_t)opcodes ){
+        while( *(opcodes + opcodesStringsCount)!= NULL ){
+            if( (i=strlen(*(opcodes + opcodesStringsCount))) > maxOpcodesLength ) 
+                maxOpcodesLength = i;
+            opcodesStringsCount++;
+        }
+        opcodes_pointer = (size_t)opcodes;
+        __TBB_ASSERT( maxOpcodesLength < 61, "Limit is 30 opcodes/60 symbols per pattern" );
+    }
+
+    // Translate prologue opcodes to string format to compare
+    for( i=0; i< maxOpcodesLength/2; i++ ){
+        sprintf( opcodeString + 2*i, "%.2X", *((unsigned char*)inpAddr+i) ); 
+    }
+    opcodeString[maxOpcodesLength] = 0;
+    
+    // Compare translated opcodes with patterns
+    for( i=0; i< opcodesStringsCount; i++ ){
+        result = compareStrings( opcodes[i],opcodeString );
+        if( result ) 
+            return (UINT)(result/2-1);
+    }
+    // TODO: to add more stuff to patterns
+    __TBB_ASSERT( false, "CheckOpcodes failed" );
+
+    // No matches found just do not store original calls
+    return 0;
+}
+
+// Insert jump relative instruction to the input address
+// RETURN: the size of the trampoline or 0 on failure
+static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr)
+{
+    UINT opcodesNumber = SIZE_OF_RELJUMP;
+    UINT_PTR srcAddr = Ptr2Addrint(inpAddr);
+    UINT_PTR tgtAddr = Ptr2Addrint(targetAddr);
+    // Check that the target fits in 32 bits
+    if (!IsInDistance(srcAddr, tgtAddr, MAX_DISTANCE))
+        return 0;
+
+    UINT_PTR offset;
+    UINT offset32;
+    UCHAR *codePtr = (UCHAR *)inpAddr;
+
+    // If requested, store original function code
+    if ( storedAddr ){
+        opcodesNumber = CheckOpcodes( opcodes, inpAddr );
+        if( opcodesNumber >= SIZE_OF_RELJUMP ){
+            UINT_PTR strdAddr = memProvider.GetLocation(srcAddr);
+            if (!strdAddr)
+                return 0;
+            *storedAddr = Addrint2Ptr(strdAddr);
+            // Set 'executable' flag for original instructions in the new place
+            DWORD pageFlags = PAGE_EXECUTE_READWRITE;
+            if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0;
+            // Copy original instructions to the new place
+            memcpy(*storedAddr, codePtr, opcodesNumber);
+            // Set jump to the code after replacement
+            offset = srcAddr - strdAddr - SIZE_OF_RELJUMP;
+            offset32 = (UINT)((offset & 0xFFFFFFFF));
+            *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9;
+            memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32));
+        }else{
+            // No matches found just do not store original calls
+            *storedAddr = NULL;
+        }
+    }
+
+    // The following will work correctly even if srcAddr>tgtAddr, as long as
+    // address difference is less than 2^31, which is guaranteed by IsInDistance.
+    offset = tgtAddr - srcAddr - SIZE_OF_RELJUMP;
+    offset32 = (UINT)(offset & 0xFFFFFFFF);
+    // Insert the jump to the new code
+    *codePtr = 0xE9;
+    memcpy(codePtr+1, &offset32, sizeof(offset32));
+
+    // Fill the rest with NOPs to correctly see disassembler of old code in debugger.
+    for( unsigned i=SIZE_OF_RELJUMP; i<opcodesNumber; i++ ){
+        *(codePtr+i) = 0x90;
+    }
+
+    return SIZE_OF_RELJUMP;
+}
+
+// This function is called when the offset doesn't fit in 32 bits
+// 1  Find and allocate a page in the small distance (<2^31) from input address
+// 2  Put jump RIP relative indirect through the address in the close page
+// 3  Put the absolute address of the target in the allocated location
+// RETURN: the size of the trampoline or 0 on failure
+static DWORD InsertTrampoline64(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr)
+{
+    UINT opcodesNumber = SIZE_OF_INDJUMP;
+
+    UINT_PTR srcAddr = Ptr2Addrint(inpAddr);
+    UINT_PTR tgtAddr = Ptr2Addrint(targetAddr);
+
+    // Get a location close to the source address
+    UINT_PTR location = memProvider.GetLocation(srcAddr);
+    if (!location)
+        return 0;
+
+    UINT_PTR offset;
+    UINT offset32;
+    UCHAR *codePtr = (UCHAR *)inpAddr;
+
+    // Fill the location
+    UINT_PTR *locPtr = (UINT_PTR *)Addrint2Ptr(location);
+    *locPtr = tgtAddr;
+
+    // If requested, store original function code
+    if( storedAddr ){
+        opcodesNumber = CheckOpcodes( opcodes, inpAddr );
+        if( opcodesNumber >= SIZE_OF_INDJUMP ){
+            UINT_PTR strdAddr = memProvider.GetLocation(srcAddr);
+            if (!strdAddr)
+                return 0;
+            *storedAddr = Addrint2Ptr(strdAddr);
+            // Set 'executable' flag for original instructions in the new place
+            DWORD pageFlags = PAGE_EXECUTE_READWRITE;
+            if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0;
+            // Copy original instructions to the new place
+            memcpy(*storedAddr, codePtr, opcodesNumber);
+            // Set jump to the code after replacement. It is within the distance of relative jump!
+            offset = srcAddr - strdAddr - SIZE_OF_RELJUMP;
+            offset32 = (UINT)((offset & 0xFFFFFFFF));
+            *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9;
+            memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32));
+        }else{
+            // No matches found just do not store original calls
+            *storedAddr = NULL;
+        }
+    }
+
+    // Fill the buffer
+     offset = location - srcAddr - SIZE_OF_INDJUMP;
+     offset32 = (UINT)(offset & 0xFFFFFFFF);
+    *(codePtr) = 0xFF;
+    *(codePtr+1) = 0x25;
+    memcpy(codePtr+2, &offset32, sizeof(offset32));
+
+    // Fill the rest with NOPs to correctly see disassembler of old code in debugger.
+    for( unsigned i=SIZE_OF_INDJUMP; i<opcodesNumber; i++ ){
+        *(codePtr+i) = 0x90;
+    }
+
+    return SIZE_OF_INDJUMP;
+}
+
+// Insert a jump instruction in the inpAddr to the targetAddr
+// 1. Get the memory protection of the page containing the input address
+// 2. Change the memory protection to writable
+// 3. Call InsertTrampoline32 or InsertTrampoline64
+// 4. Restore memory protection
+// RETURN: FALSE on failure, TRUE on success
+static bool InsertTrampoline(void *inpAddr, void *targetAddr, const char ** opcodes, void** origFunc)
+{
+    DWORD probeSize;
+    // Change page protection to EXECUTE+WRITE
+    DWORD origProt = 0;
+    if (!VirtualProtect(inpAddr, MAX_PROBE_SIZE, PAGE_EXECUTE_WRITECOPY, &origProt))
+        return FALSE;
+    probeSize = InsertTrampoline32(inpAddr, targetAddr, opcodes, origFunc);
+    if (!probeSize)
+        probeSize = InsertTrampoline64(inpAddr, targetAddr, opcodes, origFunc);
+
+    // Restore original protection
+    VirtualProtect(inpAddr, MAX_PROBE_SIZE, origProt, &origProt);
+
+    if (!probeSize)
+        return FALSE;
+
+    FlushInstructionCache(GetCurrentProcess(), inpAddr, probeSize);
+    FlushInstructionCache(GetCurrentProcess(), origFunc, probeSize);
+
+    return TRUE;
+}
+
+// Routine to replace the functions
+// TODO: replace opcodesNumber with opcodes and opcodes number to check if we replace right code.
+FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc)
+{
+    // Cache the results of the last search for the module
+    // Assume that there was no DLL unload between 
+    static char cachedName[MAX_PATH+1];
+    static HMODULE cachedHM = 0;
+
+    if (!dllName || !*dllName)
+        return FRR_NODLL;
+
+    if (!cachedHM || strncmp(dllName, cachedName, MAX_PATH) != 0)
+    {
+        // Find the module handle for the input dll
+        HMODULE hModule = GetModuleHandleA(dllName);
+        if (hModule == 0)
+        {
+            // Couldn't find the module with the input name
+            cachedHM = 0;
+            return FRR_NODLL;
+        }
+
+        cachedHM = hModule;
+        strncpy(cachedName, dllName, MAX_PATH);
+    }
+
+    FARPROC inpFunc = GetProcAddress(cachedHM, funcName);
+    if (inpFunc == 0)
+    {
+        // Function was not found
+        return FRR_NOFUNC;
+    }
+
+    if (!InsertTrampoline((void*)inpFunc, (void*)newFunc, opcodes, (void**)origFunc)){
+        // Failed to insert the trampoline to the target address
+        return FRR_FAILED;
+    }
+
+    return FRR_OK;
+}
+
+FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc)
+{
+    // Cache the results of the last search for the module
+    // Assume that there was no DLL unload between 
+    static wchar_t cachedName[MAX_PATH+1];
+    static HMODULE cachedHM = 0;
+
+    if (!dllName || !*dllName)
+        return FRR_NODLL;
+
+    if (!cachedHM || wcsncmp(dllName, cachedName, MAX_PATH) != 0)
+    {
+        // Find the module handle for the input dll
+        HMODULE hModule = GetModuleHandleW(dllName);
+        if (hModule == 0)
+        {
+            // Couldn't find the module with the input name
+            cachedHM = 0;
+            return FRR_NODLL;
+        }
+
+        cachedHM = hModule;
+        wcsncpy(cachedName, dllName, MAX_PATH);
+    }
+
+    FARPROC inpFunc = GetProcAddress(cachedHM, funcName);
+    if (inpFunc == 0)
+    {
+        // Function was not found
+        return FRR_NOFUNC;
+    }
+
+    if (!InsertTrampoline((void*)inpFunc, (void*)newFunc, opcodes, (void**)origFunc)){
+        // Failed to insert the trampoline to the target address
+        return FRR_FAILED;
+    }
+
+    return FRR_OK;
+}
+
+#endif //_WIN32
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h
new file mode 100644 (file)
index 0000000..bf520b6
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_function_replacement_H
+#define __TBB_function_replacement_H
+
+typedef enum {
+    FRR_OK,     /* Succeeded in replacing the function */
+    FRR_NODLL,  /* The requested DLL was not found */
+    FRR_NOFUNC, /* The requested function was not found */
+    FRR_FAILED, /* The function replacement request failed */
+} FRR_TYPE;
+
+typedef enum {
+    FRR_FAIL,     /* Required function */
+    FRR_IGNORE,   /* optional function */
+} FRR_ON_ERROR;
+
+typedef void (*FUNCPTR)();
+
+#ifndef UNICODE
+#define ReplaceFunction ReplaceFunctionA
+#else
+#define ReplaceFunction ReplaceFunctionW
+#endif //UNICODE
+
+FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL);
+FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL);
+
+// Utilities to convert between ADDRESS and LPVOID
+union Int2Ptr {
+    UINT_PTR uip;
+    LPVOID lpv;
+};
+
+inline UINT_PTR Ptr2Addrint(LPVOID ptr);
+inline LPVOID Addrint2Ptr(UINT_PTR ptr);
+
+// Use this value as the maximum size the trampoline region
+const unsigned MAX_PROBE_SIZE = 32;
+
+// The size of a jump relative instruction "e9 00 00 00 00"
+const unsigned SIZE_OF_RELJUMP = 5;
+
+// The size of jump RIP relative indirect "ff 25 00 00 00 00"
+const unsigned SIZE_OF_INDJUMP = 6;
+
+// The size of address we put in the location (in Intel64)
+const unsigned SIZE_OF_ADDRESS = 8;
+
+// The max distance covered in 32 bits: 2^31 - 1 - C
+// where C should not be smaller than the size of a probe.
+// The latter is important to correctly handle "backward" jumps.
+const __int64 MAX_DISTANCE = (((__int64)1 << 31) - 1) - MAX_PROBE_SIZE;
+
+// The maximum number of distinct buffers in memory
+const ptrdiff_t MAX_NUM_BUFFERS = 256;
+
+#endif //__TBB_function_replacement_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp
new file mode 100644 (file)
index 0000000..fff6744
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "TypeDefinitions.h" // Customize.h and proxy.h get included
+
+#include "../tbb/itt_notify.h" // for __TBB_load_ittnotify()
+
+#undef UNICODE
+
+#if USE_PTHREAD
+#include <dlfcn.h>
+#elif USE_WINTHREAD
+#include "tbb/machine/windows_api.h"
+#endif
+
+#if MALLOC_CHECK_RECURSION
+
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#if __sun
+#include <string.h> /* for memset */
+#include <errno.h>
+#endif
+
+#if MALLOC_LD_PRELOAD
+
+extern "C" {
+
+void   safer_scalable_free( void*, void (*)(void*) );
+void * safer_scalable_realloc( void*, size_t, void* );
+
+bool __TBB_internal_find_original_malloc(int num, const char *names[], void *table[])  __attribute__ ((weak));
+
+}
+
+#endif /* MALLOC_LD_PRELOAD */
+#endif /* MALLOC_CHECK_RECURSION */
+
+namespace rml {
+namespace internal {
+
+#if MALLOC_CHECK_RECURSION
+
+void* (*original_malloc_ptr)(size_t) = 0;
+void  (*original_free_ptr)(void*) = 0;
+static void* (*original_calloc_ptr)(size_t,size_t) = 0;
+static void* (*original_realloc_ptr)(void*,size_t) = 0;
+
+#endif /* MALLOC_CHECK_RECURSION */
+
+#if DO_ITT_NOTIFY
+/** Caller is responsible for ensuring this routine is called exactly once. */
+void MallocInitializeITT() {
+    tbb::internal::__TBB_load_ittnotify();
+}
+#else
+void MallocInitializeITT() {}
+#endif /* DO_ITT_NOTIFY */
+
+extern "C" 
+void ITT_DoOneTimeInitialization() {
+    MallocInitializeITT();
+} // required for itt_notify.cpp to work
+
+#if TBB_USE_DEBUG
+#define DEBUG_SUFFIX "_debug"
+#else
+#define DEBUG_SUFFIX
+#endif /* TBB_USE_DEBUG */
+
+// MALLOCLIB_NAME is the name of the TBB memory allocator library.
+#if _WIN32||_WIN64
+#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll"
+#elif __APPLE__
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib"
+#elif __linux__
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX  __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
+#elif __FreeBSD__ || __sun || _AIX
+#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so"
+#else
+#error Unknown OS
+#endif
+
+void init_tbbmalloc() {
+#if MALLOC_LD_PRELOAD
+    if (malloc_proxy && __TBB_internal_find_original_malloc) {
+        const char *alloc_names[] = { "malloc", "free", "realloc", "calloc"};
+        void *orig_alloc_ptrs[4];
+
+        if (__TBB_internal_find_original_malloc(4, alloc_names, orig_alloc_ptrs)) {
+            (void *&)original_malloc_ptr  = orig_alloc_ptrs[0];
+            (void *&)original_free_ptr    = orig_alloc_ptrs[1];
+            (void *&)original_realloc_ptr = orig_alloc_ptrs[2];
+            (void *&)original_calloc_ptr  = orig_alloc_ptrs[3];
+            MALLOC_ASSERT( original_malloc_ptr!=malloc_proxy,
+                           "standard malloc not found" );
+/* It's workaround for a bug in GNU Libc 2.9 (as it shipped with Fedora 10).
+   1st call to libc's malloc should be not from threaded code.
+ */
+            original_free_ptr(original_malloc_ptr(1024));
+            original_malloc_found = 1;
+        }
+    }
+#endif /* MALLOC_LD_PRELOAD */
+
+#if DO_ITT_NOTIFY
+    MallocInitializeITT();
+#endif
+
+/* Preventing TBB allocator library from unloading to prevent
+   resource leak, as memory is not released on the library unload.
+*/
+#if USE_WINTHREAD
+    // Prevent Windows from displaying message boxes if it fails to load library
+    UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS);
+    LoadLibrary(MALLOCLIB_NAME);
+    SetErrorMode (prev_mode);
+#endif /* USE_PTHREAD */
+}
+
+#if !(_WIN32||_WIN64)
+struct RegisterProcessShutdownNotification {
+    RegisterProcessShutdownNotification() {
+#if USE_PTHREAD
+        // prevents unloading, POSIX case
+        dlopen(MALLOCLIB_NAME, RTLD_NOW);
+#endif
+    }
+    ~RegisterProcessShutdownNotification() {
+        mallocProcessShutdownNotification();
+    }
+};
+
+static RegisterProcessShutdownNotification reg;
+#endif
+
+#if MALLOC_CHECK_RECURSION
+
+bool  original_malloc_found;
+
+#if MALLOC_LD_PRELOAD
+
+extern "C" {
+
+void * __TBB_internal_malloc(size_t size)
+{
+    return scalable_malloc(size);
+}
+
+void * __TBB_internal_calloc(size_t num, size_t size)
+{
+    return scalable_calloc(num, size);
+}
+
+int __TBB_internal_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+    return scalable_posix_memalign(memptr, alignment, size);
+}
+
+void* __TBB_internal_realloc(void* ptr, size_t sz)
+{
+    return safer_scalable_realloc(ptr, sz, (void*&)original_realloc_ptr);
+}
+
+void __TBB_internal_free(void *object)
+{
+    safer_scalable_free(object, original_free_ptr);
+}
+
+} /* extern "C" */
+
+#endif /* MALLOC_LD_PRELOAD */
+
+#endif /* MALLOC_CHECK_RECURSION */
+
+} } // namespaces
+
+#ifdef _WIN32
+
+extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID )
+{
+
+    if (callReason==DLL_THREAD_DETACH)
+    {
+        mallocThreadShutdownNotification(NULL);
+    }
+    else if (callReason==DLL_PROCESS_DETACH)
+    {
+        mallocProcessShutdownNotification();
+    }
+    return TRUE;
+}
+
+#endif //_WIN32
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc
new file mode 100644 (file)
index 0000000..1aba798
--- /dev/null
@@ -0,0 +1,129 @@
+// Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+//
+// This file is part of Threading Building Blocks.
+//
+// Threading Building Blocks is free software; you can redistribute it
+// and/or modify it under the terms of the GNU General Public License
+// version 2 as published by the Free Software Foundation.
+//
+// Threading Building Blocks is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Threading Building Blocks; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+//
+// As a special exception, you may use this file as part of a free software
+// library without restriction.  Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License.  This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+// Microsoft Visual C++ generated resource script.
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NO_MFC                     1
+#define _APS_NEXT_RESOURCE_VALUE        102
+#define _APS_NEXT_COMMAND_VALUE         40001
+#define _APS_NEXT_CONTROL_VALUE         1001
+#define _APS_NEXT_SYMED_VALUE           101
+#endif
+#endif
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include <winresrc.h>
+#define ENDL "\r\n"
+#include "tbb/tbb_version.h"
+
+#define TBBMALLOC_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD
+#define TBBMALLOC_VERSION __TBB_STRING(TBBMALLOC_VERNUMBERS)
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// Neutral resources
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU)
+#ifdef _WIN32
+LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL
+#pragma code_page(1252)
+#endif //_WIN32
+
+/////////////////////////////////////////////////////////////////////////////
+// manifest integration
+#ifdef TBB_MANIFEST
+#include "winuser.h"
+2 RT_MANIFEST tbbmanifest.exe.manifest
+#endif
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION TBBMALLOC_VERNUMBERS
+ PRODUCTVERSION TBB_VERNUMBERS
+ FILEFLAGSMASK 0x17L
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x40004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+    BLOCK "StringFileInfo"
+    BEGIN
+        BLOCK "000004b0"
+        BEGIN
+            VALUE "CompanyName", "Intel Corporation\0"
+            VALUE "FileDescription", "Scalable Allocator library\0"
+            VALUE "FileVersion", TBBMALLOC_VERSION "\0"
+//what is it?            VALUE "InternalName", "tbbmalloc\0"
+            VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation.  All Rights Reserved.\0"
+            VALUE "LegalTrademarks", "\0"
+#ifndef TBB_USE_DEBUG
+            VALUE "OriginalFilename", "tbbmalloc.dll\0"
+#else
+            VALUE "OriginalFilename", "tbbmalloc_debug.dll\0"
+#endif
+            VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0"
+            VALUE "ProductVersion", TBB_VERSION "\0"
+            VALUE "Comments", TBB_VERSION_STRINGS "\0"
+            VALUE "PrivateBuild", "\0"
+            VALUE "SpecialBuild", "\0"
+        END
+    END
+    BLOCK "VarFileInfo"
+    BEGIN
+        VALUE "Translation", 0x0, 1200
+    END
+END
+
+#endif    // Neutral resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////
+#endif    // not APSTUDIO_INVOKED
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h
new file mode 100644 (file)
index 0000000..ddb9c1a
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef __TBB_tbbmalloc_internal_H
+#define __TBB_tbbmalloc_internal_H 1
+
+
+#include "TypeDefinitions.h" /* Also includes customization layer Customize.h */
+
+#if USE_PTHREAD
+    // Some pthreads documentation says that <pthreads.h> must be first header.
+    #include <pthread.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#if MALLOC_CHECK_RECURSION
+#include <new>        /* for placement new */
+#endif
+
+#if __sun || __SUNPRO_CC
+#define __asm__ asm 
+#endif
+
+extern "C" {
+    void * scalable_malloc(size_t size);
+    void   scalable_free(void *object);
+    void mallocThreadShutdownNotification(void*);
+}
+
+
+/********* Various compile-time options        **************/
+
+#define MALLOC_TRACE 0
+
+#if MALLOC_TRACE
+#define TRACEF(x) printf x
+#else
+#define TRACEF(x) ((void)0)
+#endif /* MALLOC_TRACE */
+
+#define ASSERT_TEXT NULL
+
+#define COLLECT_STATISTICS MALLOC_DEBUG && defined(MALLOCENV_COLLECT_STATISTICS)
+#include "Statistics.h"
+
+/********* End compile-time options        **************/
+
+namespace rml {
+namespace internal {
+
+/********** Various numeric parameters controlling allocations ********/
+
+/*
+ * blockSize - the size of a block, it must be larger than maxSegregatedObjectSize.
+ *
+ */
+const uintptr_t blockSize = 16*1024;
+
+/*
+ * Difference between object sizes in large block bins
+ */
+const uint32_t largeBlockCacheStep = 8*1024;
+
+/*
+ * Large blocks cache cleanup frequency.
+ * It should be power of 2 for the fast checking.
+ */
+const unsigned cacheCleanupFreq = 256;
+
+/*
+ * Alignment of large (>= minLargeObjectSize) objects.
+ */
+static int largeObjectAlignment = 64; // 64 is common cache line size
+
+/********** End of numeric parameters controlling allocations *********/
+
+class BackRefIdx { // composite index to backreference array
+private:
+    uint16_t master;      // index in BackRefMaster
+    uint16_t largeObj:1;  // is this object "large"?
+    uint16_t offset  :15; // offset from beginning of BackRefBlock
+public:
+    BackRefIdx() : master((uint16_t)-1) {}
+    bool isInvalid() const { return master == (uint16_t)-1; }
+    bool isLargeObject() const { return largeObj; }
+    uint16_t getMaster() const { return master; }
+    uint16_t getOffset() const { return offset; }
+
+    // only newBackRef can modify BackRefIdx
+    static BackRefIdx newBackRef(bool largeObj);
+};
+
+struct LargeMemoryBlock {
+    LargeMemoryBlock *next,          // ptrs in list of cached blocks
+                     *prev;
+    uintptr_t         age;           // age of block while in cache
+    size_t            objectSize;    // the size requested by a client
+    size_t            unalignedSize; // the size requested from getMemory
+    bool              fromMapMemory;
+    BackRefIdx        backRefIdx;    // cached here, used copy is in LargeObjectHdr
+};
+
+struct LargeObjectHdr {
+    LargeMemoryBlock *memoryBlock;
+    /* Backreference points to LargeObjectHdr. 
+       Duplicated in LargeMemoryBlock to reuse in subsequent allocations. */
+    BackRefIdx       backRefIdx;
+};
+
+struct FreeObject {
+    FreeObject  *next;
+};
+
+// interface class for external access to Block
+class BlockI {
+public:
+    static BlockI *getRawBlock(bool startup);
+    void initialize(void *bumpPtr);
+};
+
+class FreeBlocks {
+    typedef void* (*RawAlloc) (size_t size, bool useMapMem);
+    typedef void (*RawFree) (void *object, size_t size, bool useMapMem);
+
+    RawAlloc rawAlloc;
+    RawFree rawFree;
+    size_t memReqSize;
+
+    bool mallocBigBlock();
+public:
+    bool bootstrap(RawAlloc myAlloc, RawFree myFree, size_t myReqSize);
+    BlockI *get(bool startup);
+    void put(BlockI *block, bool startup);
+    void putList(BlockI *head, BlockI *tail);
+};
+
+extern FreeBlocks freeBlocks;
+
+/******* A helper class to support overriding malloc with scalable_malloc *******/
+#if MALLOC_CHECK_RECURSION
+
+class RecursiveMallocCallProtector {
+    // pointer to an automatic data of holding thread
+    static void       *autoObjPtr;
+    static MallocMutex rmc_mutex;
+    static pthread_t   owner_thread;
+/* Under FreeBSD 8.0 1st call to any pthread function including pthread_self
+   leads to pthread initialization, that causes malloc calls. As 1st usage of
+   RecursiveMallocCallProtector can be before pthread initialized, pthread calls
+   can't be used in 1st instance of RecursiveMallocCallProtector.
+   RecursiveMallocCallProtector is used 1st time in checkInitialization(),
+   so there is a guarantee that on 2nd usage pthread is initialized. 
+   No such situation observed with other supported OSes.
+ */
+#if __FreeBSD__
+    static bool        canUsePthread;
+#else
+    static const bool  canUsePthread = true;
+#endif
+/*
+  The variable modified in checkInitialization,
+  so can be read without memory barriers.
+ */
+    static bool mallocRecursionDetected;
+
+    MallocMutex::scoped_lock* lock_acquired;
+    char scoped_lock_space[sizeof(MallocMutex::scoped_lock)+1];
+
+    static uintptr_t absDiffPtr(void *x, void *y) {
+        uintptr_t xi = (uintptr_t)x, yi = (uintptr_t)y;
+        return xi > yi ? xi - yi : yi - xi;
+    }
+public:
+
+    RecursiveMallocCallProtector() : lock_acquired(NULL) {
+        lock_acquired = new (scoped_lock_space) MallocMutex::scoped_lock( rmc_mutex );
+        if (canUsePthread)
+            owner_thread = pthread_self();
+        autoObjPtr = &scoped_lock_space;
+    }
+    ~RecursiveMallocCallProtector() {
+        if (lock_acquired) {
+            autoObjPtr = NULL;
+            lock_acquired->~scoped_lock();
+        }
+    }
+    static bool sameThreadActive() {
+        if (!autoObjPtr) // fast path
+            return false;
+        // Some thread has an active recursive call protector; check if the current one.
+        // Exact pthread_self based test
+        if (canUsePthread) {
+            if (pthread_equal( owner_thread, pthread_self() )) {
+                mallocRecursionDetected = true;
+                return true;
+            } else
+                return false;
+        }
+        // inexact stack size based test
+        const uintptr_t threadStackSz = 2*1024*1024;
+        int dummy;
+        return absDiffPtr(autoObjPtr, &dummy)<threadStackSz;
+    }
+    static bool noRecursion();
+/* The function is called on 1st scalable_malloc call to check if malloc calls
+   scalable_malloc (nested call must set mallocRecursionDetected). */
+    static void detectNaiveOverload() {
+        if (!malloc_proxy) {
+#if __FreeBSD__
+/* If !canUsePthread, we can't call pthread_self() before, but now pthread 
+   is already on, so can do it. False positives here lead to silent switching 
+   from malloc to mmap for all large allocations with bad performance impact. */
+            if (!canUsePthread) {
+                canUsePthread = true;
+                owner_thread = pthread_self();
+            }
+#endif
+            free(malloc(1));
+        }
+    }
+};
+
+#else
+
+class RecursiveMallocCallProtector {
+public:
+    RecursiveMallocCallProtector() {}
+    ~RecursiveMallocCallProtector() {}
+};
+
+#endif  /* MALLOC_CHECK_RECURSION */
+
+bool isMallocInitializedExt();
+
+void* getRawMemory (size_t size, bool useMapMem);
+void freeRawMemory (void *object, size_t size, bool useMapMem);
+
+extern const uint32_t minLargeObjectSize;
+bool isLargeObject(void *object);
+void* mallocLargeObject (size_t size, size_t alignment, bool startupAlloc = false);
+void freeLargeObject (void *object);
+
+unsigned int getThreadId();
+
+bool initBackRefMaster();
+void removeBackRef(BackRefIdx backRefIdx);
+void setBackRef(BackRefIdx backRefIdx, void *newPtr);
+void *getBackRef(BackRefIdx backRefIdx);
+
+} // namespace internal
+} // namespace rml
+
+#endif // __TBB_tbbmalloc_internal_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..7a7d072
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+{
+global:
+scalable_calloc;
+scalable_free;
+scalable_malloc;
+scalable_realloc;
+scalable_posix_memalign;
+scalable_aligned_malloc;
+scalable_aligned_realloc;
+scalable_aligned_free;
+safer_scalable_free;
+safer_scalable_realloc;
+scalable_msize;
+safer_scalable_msize;
+safer_scalable_aligned_realloc;
+local:*;
+};
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..798879e
--- /dev/null
@@ -0,0 +1,42 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+; MemoryAllocator.cpp
+scalable_calloc
+scalable_free
+scalable_malloc
+scalable_realloc
+scalable_posix_memalign
+scalable_aligned_malloc
+scalable_aligned_realloc
+scalable_aligned_free
+safer_scalable_free
+safer_scalable_realloc
+scalable_msize
+safer_scalable_msize
+safer_scalable_aligned_realloc
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..798879e
--- /dev/null
@@ -0,0 +1,42 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+; MemoryAllocator.cpp
+scalable_calloc
+scalable_free
+scalable_malloc
+scalable_realloc
+scalable_posix_memalign
+scalable_aligned_malloc
+scalable_aligned_realloc
+scalable_aligned_free
+safer_scalable_free
+safer_scalable_realloc
+scalable_msize
+safer_scalable_msize
+safer_scalable_aligned_realloc
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def b/deal.II/contrib/tbb/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def
new file mode 100644 (file)
index 0000000..86509ad
--- /dev/null
@@ -0,0 +1,42 @@
+; Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+;
+; This file is part of Threading Building Blocks.
+;
+; Threading Building Blocks is free software; you can redistribute it
+; and/or modify it under the terms of the GNU General Public License
+; version 2 as published by the Free Software Foundation.
+;
+; Threading Building Blocks is distributed in the hope that it will be
+; useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with Threading Building Blocks; if not, write to the Free Software
+; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+;
+; As a special exception, you may use this file as part of a free software
+; library without restriction.  Specifically, if other files instantiate
+; templates or use macros or inline functions from this file, or you compile
+; this file and link it with other files to produce an executable, this
+; file does not by itself cause the resulting executable to be covered by
+; the GNU General Public License.  This exception does not however
+; invalidate any other reasons why the executable file might be covered by
+; the GNU General Public License.
+
+EXPORTS
+
+; MemoryAllocator.cpp
+scalable_calloc @1
+scalable_free @2
+scalable_malloc @3
+scalable_realloc @4
+scalable_posix_memalign @5
+scalable_aligned_malloc @6
+scalable_aligned_realloc @7
+scalable_aligned_free @8
+safer_scalable_free @9
+safer_scalable_realloc @10
+scalable_msize @11
+safer_scalable_msize @12
+safer_scalable_aligned_realloc @13
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness.h
new file mode 100644 (file)
index 0000000..c4805b2
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Declarations for rock-bottom simple test harness.
+// Just include this file to use it.
+// Every test is presumed to have a command line of the form "test [-v] [MinThreads[:MaxThreads]]"
+// The default for MinThreads is 1, for MaxThreads 4.
+// The defaults can be overridden by defining macros HARNESS_DEFAULT_MIN_THREADS
+// and HARNESS_DEFAULT_MAX_THREADS before including harness.h
+
+#ifndef tbb_tests_harness_H
+#define tbb_tests_harness_H
+
+#include "tbb/tbb_config.h"
+
+namespace Harness {
+    enum TestResult {
+        Done,
+        Skipped
+    };
+}
+
+//! Entry point to a TBB unit test application
+/** It MUST be defined by the test application.
+    
+    If HARNESS_NO_PARSE_COMMAND_LINE macro was not explicitly set before including harness.h,
+    then global variables Verbose, MinThread, and MaxThread will be available and 
+    initialized when it is called.
+
+    Returns Harness::Done when the tests passed successfully. When the test fail, it must 
+    not return, calling exit(errcode) or abort() instead. When the test is not supported 
+    for the given platform/compiler/etc, it should return Harness::Skipped.
+    
+    To provide non-standard variant of main() for the test, define HARNESS_CUSTOM_MAIN
+    before including harness.h **/
+int TestMain ();
+
+#define __TBB_LAMBDAS_PRESENT  ( _MSC_VER >= 1600 && !__INTEL_COMPILER || __INTEL_COMPILER > 1100 && _TBB_CPP0X )
+
+#if defined(_MSC_VER) && _MSC_VER < 1400
+    #define __TBB_EXCEPTION_TYPE_INFO_BROKEN 1
+#else
+    #define __TBB_EXCEPTION_TYPE_INFO_BROKEN 0
+#endif
+
+#if __SUNPRO_CC
+    #include <stdlib.h>
+    #include <string.h>
+#else /* !__SUNPRO_CC */
+    #include <cstdlib>
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+    #include <cstring>
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+#endif /* !__SUNPRO_CC */
+
+#include <new>
+
+    #define HARNESS_EXPORT
+    #define REPORT_FATAL_ERROR REPORT
+
+#if _WIN32||_WIN64
+    #include "tbb/machine/windows_api.h"
+#if _XBOX
+    #undef HARNESS_NO_PARSE_COMMAND_LINE
+    #define HARNESS_NO_PARSE_COMMAND_LINE 1
+#endif
+    #include <process.h>
+#else
+    #include <pthread.h>
+#endif
+#if __linux__
+    #include <sys/utsname.h> /* for uname */
+    #include <errno.h>       /* for use in LinuxKernelVersion() */
+#endif
+
+#include "harness_report.h"
+
+#if !HARNESS_NO_ASSERT
+#include "harness_assert.h"
+
+typedef void (*test_error_extra_t)(void);
+static test_error_extra_t ErrorExtraCall; 
+//! Set additional handler to process failed assertions
+void SetHarnessErrorProcessing( test_error_extra_t extra_call ) {
+    ErrorExtraCall = extra_call;
+    // TODO: add tbb::set_assertion_handler(ReportError);
+}
+//! Reports errors issued by failed assertions
+void ReportError( const char* filename, int line, const char* expression, const char * message ) {
+#if __TBB_ICL_11_1_CODE_GEN_BROKEN
+    printf("%s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" );
+#else
+    REPORT_FATAL_ERROR("%s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" );
+#endif
+    if( ErrorExtraCall )
+        (*ErrorExtraCall)();
+#if HARNESS_TERMINATE_ON_ASSERT
+    TerminateProcess(GetCurrentProcess(), 1);
+#elif HARNESS_EXIT_ON_ASSERT
+    exit(1);
+#else
+    abort();
+#endif /* HARNESS_EXIT_ON_ASSERT */
+}
+//! Reports warnings issued by failed warning assertions
+void ReportWarning( const char* filename, int line, const char* expression, const char * message ) {
+    REPORT("Warning: %s:%d, assertion %s: %s\n", filename, line, expression, message ? message : "failed" );
+}
+#else
+#define ASSERT(p,msg) ((void)0)
+#define ASSERT_WARNING(p,msg) ((void)0)
+#endif /* HARNESS_NO_ASSERT */
+
+#if !HARNESS_NO_PARSE_COMMAND_LINE
+
+//! Controls level of commentary printed via printf-like REMARK() macro.
+/** If true, makes the test print commentary.  If false, test should print "done" and nothing more. */
+static bool Verbose;
+
+#ifndef HARNESS_DEFAULT_MIN_THREADS
+    #define HARNESS_DEFAULT_MIN_THREADS 1
+#endif
+
+//! Minimum number of threads
+static int MinThread = HARNESS_DEFAULT_MIN_THREADS;
+
+#ifndef HARNESS_DEFAULT_MAX_THREADS
+    #define HARNESS_DEFAULT_MAX_THREADS 4
+#endif
+
+//! Maximum number of threads
+static int MaxThread = HARNESS_DEFAULT_MAX_THREADS;
+
+//! Parse command line of the form "name [-v] [MinThreads[:MaxThreads]]"
+/** Sets Verbose, MinThread, and MaxThread accordingly.
+    The nthread argument can be a single number or a range of the form m:n.
+    A single number m is interpreted as if written m:m. 
+    The numbers must be non-negative.  
+    Clients often treat the value 0 as "run sequentially." */
+static void ParseCommandLine( int argc, char* argv[] ) {
+    if( !argc ) REPORT("Command line with 0 arguments\n");
+    int i = 1;  
+    if( i<argc ) {
+        if( strncmp( argv[i], "-v", 2 )==0 ) {
+            Verbose = true;
+            ++i;
+        }
+    }
+    if( i<argc ) {
+        char* endptr;
+        MinThread = strtol( argv[i], &endptr, 0 );
+        if( *endptr==':' )
+            MaxThread = strtol( endptr+1, &endptr, 0 );
+        else if( *endptr=='\0' ) 
+            MaxThread = MinThread;
+        if( *endptr!='\0' ) {
+            REPORT_FATAL_ERROR("garbled nthread range\n");
+            exit(1);
+        }    
+        if( MinThread<0 ) {
+            REPORT_FATAL_ERROR("nthread must be nonnegative\n");
+            exit(1);
+        }
+        if( MaxThread<MinThread ) {
+            REPORT_FATAL_ERROR("nthread range is backwards\n");
+            exit(1);
+        }
+        ++i;
+    }
+#if __TBB_STDARGS_BROKEN
+    if ( !argc )
+        argc = 1;
+    else {
+        while ( i < argc && argv[i][0] == 0 )
+            ++i;
+    }
+#endif /* __TBB_STDARGS_BROKEN */
+    if( i!=argc ) {
+        REPORT_FATAL_ERROR("Usage: %s [-v] [nthread|minthread:maxthread]\n", argv[0] );
+        exit(1);
+    }
+}
+#endif /* HARNESS_NO_PARSE_COMMAND_LINE */
+
+#if !HARNESS_CUSTOM_MAIN
+
+HARNESS_EXPORT
+#if HARNESS_NO_PARSE_COMMAND_LINE
+int main() {
+#else
+int main(int argc, char* argv[]) {
+    ParseCommandLine( argc, argv );
+#endif
+    int res = TestMain ();
+    ASSERT( res==Harness::Done || res==Harness::Skipped, "Wrong return code by TestMain");
+    REPORT( res==Harness::Done ? "done\n" : "skip\n" );
+    return 0;
+}
+
+#endif /* !HARNESS_CUSTOM_MAIN */
+
+//! Base class for prohibiting compiler-generated operator=
+class NoAssign {
+    //! Assignment not allowed
+    void operator=( const NoAssign& );
+public:
+#if __GNUC__
+    //! Explicitly define default construction, because otherwise gcc issues gratuitous warning.
+    NoAssign() {}
+#endif /* __GNUC__ */
+};
+
+//! Base class for prohibiting compiler-generated copy constructor or operator=
+class NoCopy: NoAssign {
+    //! Copy construction not allowed  
+    NoCopy( const NoCopy& );
+public:
+    NoCopy() {}
+};
+
+//! For internal use by template function NativeParallelFor
+template<typename Index, typename Body>
+class NativeParallelForTask: NoCopy {
+public:
+    NativeParallelForTask( Index index_, const Body& body_ ) :
+        index(index_),
+        body(body_)
+    {}
+
+    //! Start task
+    void start() {
+#if _WIN32||_WIN64
+        unsigned thread_id;
+        thread_handle = (HANDLE)_beginthreadex( NULL, 0, thread_function, this, 0, &thread_id );
+        ASSERT( thread_handle!=0, "NativeParallelFor: _beginthreadex failed" );
+#else
+#if __ICC==1100
+    #pragma warning (push)
+    #pragma warning (disable: 2193)
+#endif /* __ICC==1100 */
+        // Some machines may have very large hard stack limit. When the test is 
+        // launched by make, the default stack size is set to the hard limit, and 
+        // calls to pthread_create fail with out-of-memory error. 
+        // Therefore we set the stack size explicitly (as for TBB worker threads).
+        const size_t MByte = 1<<20;
+#if __i386__||__i386
+        const size_t stack_size = 1*MByte;
+#elif __x86_64__
+        const size_t stack_size = 2*MByte;
+#else
+        const size_t stack_size = 4*MByte;
+#endif
+        pthread_attr_t attr_stack;
+        int status = pthread_attr_init(&attr_stack);
+        ASSERT(0==status, "NativeParallelFor: pthread_attr_init failed");
+        status = pthread_attr_setstacksize( &attr_stack, stack_size );
+        ASSERT(0==status, "NativeParallelFor: pthread_attr_setstacksize failed");
+        status = pthread_create(&thread_id, &attr_stack, thread_function, this);
+        ASSERT(0==status, "NativeParallelFor: pthread_create failed");
+        pthread_attr_destroy(&attr_stack);
+#if __ICC==1100
+    #pragma warning (pop)
+#endif
+#endif /* _WIN32||_WIN64 */
+    }
+
+    //! Wait for task to finish
+    void wait_to_finish() {
+#if _WIN32||_WIN64
+        DWORD status = WaitForSingleObject( thread_handle, INFINITE );
+        ASSERT( status!=WAIT_FAILED, "WaitForSingleObject failed" );
+        CloseHandle( thread_handle );
+#else
+        int status = pthread_join( thread_id, NULL );
+        ASSERT( !status, "pthread_join failed" );
+#endif 
+    }
+
+private:
+#if _WIN32||_WIN64
+    HANDLE thread_handle;
+#else
+    pthread_t thread_id;
+#endif
+
+    //! Range over which task will invoke the body.
+    const Index index;
+
+    //! Body to invoke over the range.
+    const Body body;
+
+#if _WIN32||_WIN64
+    static unsigned __stdcall thread_function( void* object )
+#else
+    static void* thread_function(void* object)
+#endif
+    {
+        NativeParallelForTask& self = *static_cast<NativeParallelForTask*>(object);
+        (self.body)(self.index);
+        return 0;
+    }
+};
+
+//! Execute body(i) in parallel for i in the interval [0,n).
+/** Each iteration is performed by a separate thread. */
+template<typename Index, typename Body>
+void NativeParallelFor( Index n, const Body& body ) {
+    typedef NativeParallelForTask<Index,Body> task;
+
+    if( n>0 ) {
+        // Allocate array to hold the tasks
+        task* array = static_cast<task*>(operator new( n*sizeof(task) ));
+
+        // Construct the tasks
+        for( Index i=0; i!=n; ++i ) 
+            new( &array[i] ) task(i,body);
+
+        // Start the tasks
+        for( Index i=0; i!=n; ++i )
+            array[i].start();
+
+        // Wait for the tasks to finish and destroy each one.
+        for( Index i=n; i; --i ) {
+            array[i-1].wait_to_finish();
+            array[i-1].~task();
+        }
+
+        // Deallocate the task array
+        operator delete(array);
+    }
+}
+
+//! The function to zero-initialize arrays; useful to avoid warnings
+template <typename T>
+void zero_fill(void* array, size_t n) {
+    memset(array, 0, sizeof(T)*n);
+}
+
+#if __SUNPRO_CC && defined(min)
+#undef min
+#undef max
+#endif
+
+#ifndef min
+//! Utility template function returning lesser of the two values.
+/** Provided here to avoid including not strict safe <algorithm>.\n
+    In case operands cause signed/unsigned or size mismatch warnings it is caller's
+    responsibility to do the appropriate cast before calling the function. **/
+template<typename T1, typename T2>
+T1 min ( const T1& val1, const T2& val2 ) {
+    return val1 < val2 ? val1 : val2;
+}
+#endif /* !min */
+
+#ifndef max
+//! Utility template function returning greater of the two values.
+/** Provided here to avoid including not strict safe <algorithm>.\n
+    In case operands cause signed/unsigned or size mismatch warnings it is caller's
+    responsibility to do the appropriate cast before calling the function. **/
+template<typename T1, typename T2>
+T1 max ( const T1& val1, const T2& val2 ) {
+    return val1 < val2 ? val2 : val1;
+}
+#endif /* !max */
+
+#if __linux__
+inline unsigned LinuxKernelVersion()
+{
+    unsigned digit1, digit2, digit3;
+    struct utsname utsnameBuf;
+    
+    if (-1 == uname(&utsnameBuf)) {
+        REPORT_FATAL_ERROR("Can't call uname: errno %d\n", errno);
+        exit(1);
+    }
+    if (3 != sscanf(utsnameBuf.release, "%u.%u.%u", &digit1, &digit2, &digit3)) {
+        REPORT_FATAL_ERROR("Unable to parse OS release '%s'\n", utsnameBuf.release);
+        exit(1);
+    }
+    return 1000000*digit1+1000*digit2+digit3;
+}
+#endif
+
+namespace Harness {
+
+#if !HARNESS_NO_ASSERT
+//! Base class that asserts that no operations are made with the object after its destruction.
+class NoAfterlife {
+protected:
+    enum state_t {
+        LIVE=0x56781234,
+        DEAD=0xDEADBEEF
+    } m_state;
+
+public:
+    NoAfterlife() : m_state(LIVE) {}
+    NoAfterlife( const NoAfterlife& src ) : m_state(LIVE) {
+        ASSERT( src.IsLive(), "Constructing from the dead source" );
+    }
+    ~NoAfterlife() {
+        ASSERT( IsLive(), "Repeated destructor call" );
+        m_state = DEAD;
+    }
+    const NoAfterlife& operator=( const NoAfterlife& src ) {
+        ASSERT( IsLive(), NULL );
+        ASSERT( src.IsLive(), NULL );
+        return *this;
+    }
+    void AssertLive() const {
+        ASSERT( IsLive(), "Already dead" );
+    }
+    bool IsLive() const {
+        return m_state == LIVE;
+    }
+}; // NoAfterlife
+#endif /* !HARNESS_NO_ASSERT */
+
+#if _WIN32 || _WIN64
+    void Sleep ( int ms ) { ::Sleep(ms); }
+#else /* !WIN */
+    void Sleep ( int ms ) {
+        timespec  requested = { ms / 1000, (ms % 1000)*1000000 };
+        timespec  remaining = { 0, 0 };
+        nanosleep(&requested, &remaining);
+    }
+#endif /* !WIN */
+
+} // namespace Harness
+
+#endif /* tbb_tests_harness_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_allocator.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_allocator.h
new file mode 100644 (file)
index 0000000..00e5cd6
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Declarations for simple estimate of the memory being used by a program.
+// Not yet implemented for Mac.
+// This header is an optional part of the test harness.
+// It assumes that "harness_assert.h" has already been included.
+
+#if __linux__ || __APPLE__ || __sun
+#include <unistd.h>
+#elif _WIN32 
+#include "tbb/machine/windows_api.h"
+#endif /* OS specific */
+#include <new>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "tbb/atomic.h"
+
+#if __SUNPRO_CC
+using std::printf;
+#endif
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (push)
+    #pragma warning (disable: 4267)
+#endif
+
+
+template <typename base_alloc_t, typename count_t = tbb::atomic<size_t> >
+class static_counting_allocator : public base_alloc_t
+{
+public:
+    typedef typename base_alloc_t::pointer pointer;
+    typedef typename base_alloc_t::const_pointer const_pointer;
+    typedef typename base_alloc_t::reference reference;
+    typedef typename base_alloc_t::const_reference const_reference;
+    typedef typename base_alloc_t::value_type value_type;
+    typedef typename base_alloc_t::size_type size_type;
+    typedef typename base_alloc_t::difference_type difference_type;
+    template<typename U> struct rebind {
+        typedef static_counting_allocator<typename base_alloc_t::template rebind<U>::other,count_t> other;
+    };
+
+    static size_t max_items;
+    static count_t items_allocated;
+    static count_t items_freed;
+    static count_t allocations;
+    static count_t frees;
+    static bool verbose, throwing;
+
+    static_counting_allocator() throw() { }
+
+    static_counting_allocator(const static_counting_allocator& src) throw() 
+    : base_alloc_t(src) { }
+
+    template<typename U, typename C>
+    static_counting_allocator(const static_counting_allocator<U, C>& src) throw()
+    : base_alloc_t(src) { }
+
+    bool operator==(const static_counting_allocator &a) const
+    { return true; }
+
+    pointer allocate(const size_type n)
+    {
+        if(verbose) printf("\t+%d|", int(n));
+        if(max_items && items_allocated + n >= max_items) {
+            if(verbose) printf("items limit hits!");
+            if(throwing)
+                __TBB_THROW( std::bad_alloc() );
+            return NULL;
+        }
+        allocations++;
+        items_allocated += n;
+        return base_alloc_t::allocate(n, pointer(0));
+    }
+
+    pointer allocate(const size_type n, const void * const)
+    {   return allocate(n); }
+
+    void deallocate(const pointer ptr, const size_type n)
+    {
+        if(verbose) printf("\t-%d|", int(n));
+        frees++;
+        items_freed += n;
+        base_alloc_t::deallocate(ptr, n);
+    }
+
+    static void init_counters(bool v = false) {
+        verbose = v;
+        if(verbose) printf("\n------------------------------------------- Allocations:\n");
+        items_allocated = 0;
+        items_freed = 0;
+        allocations = 0;
+        frees = 0;
+        max_items = 0;
+    }
+
+    static void set_limits(size_type max = 0, bool do_throw = true) {
+        max_items = max;
+        throwing = do_throw;
+    }
+};
+
+template <typename base_alloc_t, typename count_t>
+size_t static_counting_allocator<base_alloc_t, count_t>::max_items;
+template <typename base_alloc_t, typename count_t>
+count_t static_counting_allocator<base_alloc_t, count_t>::items_allocated;
+template <typename base_alloc_t, typename count_t>
+count_t static_counting_allocator<base_alloc_t, count_t>::items_freed;
+template <typename base_alloc_t, typename count_t>
+count_t static_counting_allocator<base_alloc_t, count_t>::allocations;
+template <typename base_alloc_t, typename count_t>
+count_t static_counting_allocator<base_alloc_t, count_t>::frees;
+template <typename base_alloc_t, typename count_t>
+bool static_counting_allocator<base_alloc_t, count_t>::verbose;
+template <typename base_alloc_t, typename count_t>
+bool static_counting_allocator<base_alloc_t, count_t>::throwing;
+
+template <typename base_alloc_t, typename count_t = tbb::atomic<size_t> >
+class local_counting_allocator : public base_alloc_t
+{
+public:
+    typedef typename base_alloc_t::pointer pointer;
+    typedef typename base_alloc_t::const_pointer const_pointer;
+    typedef typename base_alloc_t::reference reference;
+    typedef typename base_alloc_t::const_reference const_reference;
+    typedef typename base_alloc_t::value_type value_type;
+    typedef typename base_alloc_t::size_type size_type;
+    typedef typename base_alloc_t::difference_type difference_type;
+    template<typename U> struct rebind {
+        typedef local_counting_allocator<typename base_alloc_t::template rebind<U>::other,count_t> other;
+    };
+
+    count_t items_allocated;
+    count_t items_freed;
+    count_t allocations;
+    count_t frees;
+    size_t max_items;
+
+    local_counting_allocator() throw() {
+        items_allocated = 0;
+        items_freed = 0;
+        allocations = 0;
+        frees = 0;
+        max_items = 0;
+    }
+
+    local_counting_allocator(const local_counting_allocator &a) throw()
+        : base_alloc_t(a)
+        , items_allocated(a.items_allocated)
+        , items_freed(a.items_freed)
+        , allocations(a.allocations)
+        , frees(a.frees)
+        , max_items(a.max_items)
+    { }
+
+    template<typename U, typename C>
+    local_counting_allocator(const static_counting_allocator<U,C> &) throw() {
+        items_allocated = static_counting_allocator<U,C>::items_allocated;
+        items_freed = static_counting_allocator<U,C>::items_freed;
+        allocations = static_counting_allocator<U,C>::allocations;
+        frees = static_counting_allocator<U,C>::frees;
+        max_items = static_counting_allocator<U,C>::max_items;
+    }
+
+    template<typename U, typename C>
+    local_counting_allocator(const local_counting_allocator<U,C> &a) throw()
+        : items_allocated(a.items_allocated)
+        , items_freed(a.items_freed)
+        , allocations(a.allocations)
+        , frees(a.frees)
+        , max_items(a.max_items)
+    { }
+
+    bool operator==(const local_counting_allocator &a) const
+    { return &a == this; }
+
+    pointer allocate(const size_type n)
+    {
+        if(max_items && items_allocated + n >= max_items)
+            __TBB_THROW( std::bad_alloc() );
+        ++allocations;
+        items_allocated += n;
+        return base_alloc_t::allocate(n, pointer(0));
+    }
+
+    pointer allocate(const size_type n, const void * const)
+    { return allocate(n); }
+
+    void deallocate(const pointer ptr, const size_type n)
+    {
+        ++frees;
+        items_freed += n;
+        base_alloc_t::deallocate(ptr, n);
+    }
+
+    void set_limits(size_type max = 0) {
+        max_items = max;
+    }
+};
+
+template <typename T, template<typename X> class Allocator = std::allocator>
+class debug_allocator : public Allocator<T>
+{
+public:
+    typedef Allocator<T> base_allocator_type;
+    typedef typename base_allocator_type::value_type value_type;
+    typedef typename base_allocator_type::pointer pointer;
+    typedef typename base_allocator_type::const_pointer const_pointer;
+    typedef typename base_allocator_type::reference reference;
+    typedef typename base_allocator_type::const_reference const_reference;
+    typedef typename base_allocator_type::size_type size_type;
+    typedef typename base_allocator_type::difference_type difference_type;
+    template<typename U> struct rebind {
+        typedef debug_allocator<U, Allocator> other;
+    };
+
+    debug_allocator() throw() { }
+    debug_allocator(const debug_allocator &a) throw() : base_allocator_type( a ) { }
+    template<typename U>
+    debug_allocator(const debug_allocator<U> &a) throw() : base_allocator_type( Allocator<U>( a ) ) { }
+
+    pointer allocate(const size_type n, const void *hint = 0 ) {
+        pointer ptr = base_allocator_type::allocate( n, hint );
+        std::memset( ptr, 0xE3E3E3E3, n * sizeof(value_type) );
+        return ptr;
+    }
+};
+
+//! Analogous to std::allocator<void>, as defined in ISO C++ Standard, Section 20.4.1
+/** @ingroup memory_allocation */
+template<template<typename T> class Allocator> 
+class debug_allocator<void, Allocator> : public Allocator<void> {
+public:
+    typedef Allocator<void> base_allocator_type;
+    typedef typename base_allocator_type::value_type value_type;
+    typedef typename base_allocator_type::pointer pointer;
+    typedef typename base_allocator_type::const_pointer const_pointer;
+    template<typename U> struct rebind {
+        typedef debug_allocator<U, Allocator> other;
+    };
+};
+
+template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>
+inline bool operator==( const debug_allocator<T1,B1> &a, const debug_allocator<T2,B2> &b) {
+    return static_cast< B1<T1> >(a) == static_cast< B2<T2> >(b);
+}
+template<typename T1, template<typename X1> class B1, typename T2, template<typename X2> class B2>
+inline bool operator!=( const debug_allocator<T1,B1> &a, const debug_allocator<T2,B2> &b) {
+    return static_cast< B1<T1> >(a) != static_cast< B2<T2> >(b);
+}
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (pop)
+#endif // warning 4267 is back
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_assert.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_assert.h
new file mode 100644 (file)
index 0000000..0fecbd5
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Just the assertion portion of the harness.
+// This is useful for writing portions of tests that include
+// the minimal number of necessary header files.
+//
+// The full "harness.h" must be included later.
+
+#ifndef harness_assert_H
+#define harness_assert_H
+
+void ReportError( const char* filename, int line, const char* expression, const char* message); 
+void ReportWarning( const char* filename, int line, const char* expression, const char* message); 
+
+#define ASSERT(p,message) ((p)?(void)0:ReportError(__FILE__,__LINE__,#p,message))
+#define ASSERT_WARNING(p,message) ((p)?(void)0:ReportWarning(__FILE__,__LINE__,#p,message))
+
+//! Compile-time error if x and y have different types
+template<typename T>
+void AssertSameType( const T& /*x*/, const T& /*y*/ ) {}
+
+#endif /* harness_assert_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_bad_expr.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_bad_expr.h
new file mode 100644 (file)
index 0000000..2fc0b4a
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Declarations for checking __TBB_ASSERT checks inside TBB.
+// This header is an optional part of the test harness.
+// It assumes that "harness.h" has already been included.
+
+#define TRY_BAD_EXPR_ENABLED (TBB_USE_ASSERT && TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN)
+
+#if TRY_BAD_EXPR_ENABLED
+
+//! Check that expression x raises assertion failure with message containing given substring.
+/** Assumes that tbb::set_assertion_handler( AssertionFailureHandler ) was called earlier. */
+#define TRY_BAD_EXPR(x,substr)          \
+    {                                   \
+        const char* message = NULL;     \
+        bool okay = false;              \
+        try {                           \
+            x;                          \
+        } catch( AssertionFailure a ) { \
+            okay = true;                \
+            message = a.message;        \
+        }                               \
+        CheckAssertionFailure(__LINE__,#x,okay,message,substr); \
+    }
+
+//! Exception object that holds a message.
+struct AssertionFailure {
+    const char* message;
+    AssertionFailure( const char* filename, int line, const char* expression, const char* comment );
+};
+
+AssertionFailure::AssertionFailure( const char* filename, int line, const char* expression, const char* comment ) : 
+    message(comment) 
+{
+    ASSERT(filename,"missing filename");
+    ASSERT(0<line,"line number must be positive");
+    // All of our current files have fewer than 4000 lines.
+    ASSERT(line<5000,"dubiously high line number");
+    ASSERT(expression,"missing expression");
+}
+
+void AssertionFailureHandler( const char* filename, int line, const char* expression, const char* comment ) {
+    throw AssertionFailure(filename,line,expression,comment);
+}
+
+void CheckAssertionFailure( int line, const char* expression, bool okay, const char* message, const char* substr ) {
+    if( !okay ) {
+        REPORT("Line %d, %s failed to fail\n", line, expression );
+        abort();
+    } else if( !message ) {
+        REPORT("Line %d, %s failed without a message\n", line, expression );
+        abort();
+    } else if( strstr(message,substr)==0 ) {                            
+        REPORT("Line %d, %s failed with message '%s' missing substring '%s'\n", __LINE__, expression, message, substr );
+        abort();
+    }
+}
+
+#endif /* TRY_BAD_EXPR_ENABLED */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_barrier.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_barrier.h
new file mode 100644 (file)
index 0000000..78d7c76
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/atomic.h"
+
+#ifndef harness_barrier_H
+#define harness_barrier_H
+
+namespace Harness {
+
+class SpinBarrier
+{
+    unsigned numThreads;
+    tbb::atomic<unsigned> numThreadsFinished; /* threads reached barrier in this epoch */
+    tbb::atomic<unsigned> epoch;   /* how many times this barrier used - XXX move to a separate cache line */
+
+    struct DummyCallback {
+        void operator() () const {}
+    };
+
+    SpinBarrier( const SpinBarrier& );    // no copy ctor
+    void operator=( const SpinBarrier& ); // no assignment 
+public:
+    SpinBarrier( unsigned nthreads = 0 ) { initialize(nthreads); };
+
+    void initialize( unsigned nthreads ) {
+        numThreads = nthreads;
+        numThreadsFinished = 0;
+        epoch = 0;
+    };
+
+    // onOpenBarrierCallback is called by last thread arrived on a barrier
+    template<typename Callback>
+    bool wait(const Callback &onOpenBarrierCallback)
+    { // return true if last thread
+        unsigned myEpoch = epoch;
+        int threadsLeft = numThreads - numThreadsFinished.fetch_and_increment() - 1;
+        ASSERT(threadsLeft>=0, "Broken barrier");
+        if (threadsLeft > 0) {
+            /* not the last threading reaching barrier, wait until epoch changes & return 0 */
+            tbb::internal::spin_wait_while_eq(epoch, myEpoch);
+            return false;
+        }
+        /* No more threads left to enter, so I'm the last one reaching this epoch;
+           reset the barrier, increment epoch, and return non-zero */
+        onOpenBarrierCallback();
+        numThreadsFinished = 0;
+        epoch = myEpoch+1; /* wakes up threads waiting to exit this epoch */
+        return true;
+    }
+    bool wait()
+    {
+        return wait(DummyCallback());
+    }
+};
+
+}
+
+#endif //harness_barrier_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_concurrency_tracker.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_concurrency_tracker.h
new file mode 100644 (file)
index 0000000..5a5fa88
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef tbb_tests_harness_concurrency_tracker_H
+#define tbb_tests_harness_concurrency_tracker_H
+
+#include "harness.h"
+#include "tbb/atomic.h"
+#include "../tbb/tls.h"
+
+namespace Harness {
+
+static tbb::atomic<unsigned> ctInstantParallelism;
+static tbb::atomic<unsigned> ctPeakParallelism;
+static tbb::internal::tls<uintptr_t>  ctNested;
+
+class ConcurrencyTracker {
+    bool    m_Outer;
+
+    static void Started () {
+        unsigned p = ++ctInstantParallelism;
+        unsigned q = ctPeakParallelism;
+        while( q<p ) {
+            q = ctPeakParallelism.compare_and_swap(p,q);
+        }
+    }
+
+    static void Stopped () {
+        ASSERT ( ctInstantParallelism > 0, "Mismatched call to ConcurrencyTracker::Stopped()" );
+        --ctInstantParallelism;
+    }
+public:
+    ConcurrencyTracker() : m_Outer(false) {
+        uintptr_t nested = ctNested;
+        ASSERT (nested == 0 || nested == 1, NULL);
+        if ( !ctNested ) {
+            Started();
+            m_Outer = true;
+            ctNested = 1;
+        }
+    }
+    ~ConcurrencyTracker() {
+        if ( m_Outer ) {
+            Stopped();
+            ctNested = 0;
+        }
+    }
+
+    static unsigned PeakParallelism() { return ctPeakParallelism; }
+    static unsigned InstantParallelism() { return ctInstantParallelism; }
+
+    static void Reset() {
+        ASSERT (ctInstantParallelism == 0, "Reset cannot be called when concurrency tracking is underway");
+        ctInstantParallelism = ctPeakParallelism = 0;
+    }
+}; // ConcurrencyTracker
+
+} // namespace Harness
+
+#endif /* tbb_tests_harness_concurrency_tracker_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_cpu.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_cpu.h
new file mode 100644 (file)
index 0000000..b3c91c3
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Declarations for simple estimate of CPU time being used by a program.
+// This header is an optional part of the test harness.
+// It assumes that "harness_assert.h" has already been included.
+
+#if _WIN32 
+#if !_XBOX
+    #include <windows.h>
+#endif
+#else
+    #include <sys/time.h>
+    #include <sys/resource.h>
+#endif
+
+//! Return time (in seconds) spent by the current process in user mode.
+/*  Returns 0 if not implemented on platform. */
+static double GetCPUUserTime() { 
+#if _XBOX
+    return 0;
+#elif _WIN32
+    FILETIME my_times[4];
+    bool status = GetProcessTimes(GetCurrentProcess(), my_times, my_times+1, my_times+2, my_times+3)!=0;
+    ASSERT( status, NULL );
+    LARGE_INTEGER usrtime;
+    usrtime.LowPart = my_times[3].dwLowDateTime;
+    usrtime.HighPart = my_times[3].dwHighDateTime;
+    return double(usrtime.QuadPart)*1E-7;
+#else
+    // Generic UNIX, including __APPLE__
+
+    // On Linux, there is no good way to get CPU usage info for the current process:
+    //   getrusage(RUSAGE_SELF, ...) that is used now only returns info for the calling thread;
+    //   getrusage(RUSAGE_CHILDREN, ...) only counts for finished children threads;
+    //   tms_utime and tms_cutime got with times(struct tms*) are equivalent to the above items;
+    //   finally, /proc/self/task/<task_id>/stat doesn't exist on older kernels 
+    //      and it isn't quite convenient to read it for every task_id.
+
+    struct rusage resources;
+    bool status = getrusage(RUSAGE_SELF, &resources)==0;
+    ASSERT( status, NULL );
+    return (double(resources.ru_utime.tv_sec)*1E6 + double(resources.ru_utime.tv_usec))*1E-6;
+#endif
+}
+
+#include "tbb/tick_count.h"
+#include <cstdio>
+
+// The resolution of GetCPUUserTime is 10-15 ms or so; waittime should be a few times bigger.
+const double WAITTIME = 0.1; // in seconds, i.e. 100 ms
+const double THRESHOLD = WAITTIME/100;
+
+static void TestCPUUserTime( int nthreads, int nactive = 1 ) {
+    // The test will always pass on Linux; read the comments in GetCPUUserTime for details
+    // Also it will not detect spinning issues on systems with only one processing core.
+
+    int nworkers = nthreads-nactive;
+    if( !nworkers ) return;
+    double lastusrtime = GetCPUUserTime();
+    if( !lastusrtime ) return;
+
+    static double minimal_waittime = WAITTIME,
+                  maximal_waittime = WAITTIME * 10;
+    double usrtime;
+    double waittime;
+    tbb::tick_count stamp = tbb::tick_count::now();
+    // wait for GetCPUUserTime update
+    while( (usrtime=GetCPUUserTime())-lastusrtime < THRESHOLD ) {
+        volatile intptr_t k = (intptr_t)&usrtime;
+        for ( int i = 0; i < 1000; ++i ) ++k;
+        if ( (waittime = (tbb::tick_count::now()-stamp).seconds()) > maximal_waittime ) {
+            REPORT( "Warning: %.2f sec elapsed but user mode time is still below its threshold (%g < %g)\n", 
+                    waittime, usrtime - lastusrtime, THRESHOLD );
+            break;
+        }
+    }
+    lastusrtime = usrtime;
+    
+    // Wait for workers to go sleep
+    stamp = tbb::tick_count::now();
+    while( ((waittime=(tbb::tick_count::now()-stamp).seconds()) < minimal_waittime) 
+            || ((usrtime=GetCPUUserTime()-lastusrtime) < THRESHOLD) )
+    {
+        if ( waittime > maximal_waittime ) {
+            REPORT( "Warning: %.2f sec elapsed but GetCPUUserTime reported only %g sec\n", waittime, usrtime );
+            break;
+        }
+    }
+
+    // Test that all workers sleep when no work.
+    while( nactive>1 && usrtime-nactive*waittime<0 ) {
+        // probably the number of active threads was mispredicted
+        --nactive; ++nworkers;
+    }
+    double avg_worker_usrtime = (usrtime-nactive*waittime)/nworkers;
+
+    if( avg_worker_usrtime > waittime/2 )
+        REPORT( "ERROR: %d worker threads are spinning; waittime: %g; usrtime: %g; avg worker usrtime: %g\n",
+                nworkers, waittime, usrtime, avg_worker_usrtime);
+    else
+        REMARK("%d worker threads; waittime: %g; usrtime: %g; avg worker usrtime: %g\n",
+                        nworkers, waittime, usrtime, avg_worker_usrtime);
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_eh.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_eh.h
new file mode 100644 (file)
index 0000000..48f4b2e
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <typeinfo>
+#include "tbb/atomic.h"
+#include "harness.h"
+#include "harness_concurrency_tracker.h"
+
+namespace Harness {
+#if _WIN32 || _WIN64
+    typedef DWORD tid_t;
+    tid_t CurrentTid () { return GetCurrentThreadId(); }
+#else /* !WIN */
+    typedef pthread_t tid_t;
+    tid_t CurrentTid () { return pthread_self(); }
+#endif /* !WIN */
+} // namespace Harness
+
+int g_NumThreads = 0;
+Harness::tid_t  g_Master = 0;
+
+tbb::atomic<intptr_t> g_CurExecuted,
+                      g_ExecutedAtCatch,
+                      g_ExceptionsThrown;
+volatile bool g_ExceptionCaught = false,
+              g_UnknownException = false;
+
+volatile bool g_ThrowException = true,
+              g_Flog = false;
+
+bool    g_ExceptionInMaster = false;
+bool    g_SolitaryException = false;
+
+//! Number of exceptions propagated into the user code (i.e. intercepted by the tests)
+tbb::atomic<intptr_t> g_Exceptions;
+
+inline void ResetEhGlobals ( bool throwException = true, bool flog = false ) {
+    Harness::ConcurrencyTracker::Reset();
+    g_CurExecuted = g_ExecutedAtCatch = 0;
+    g_ExceptionCaught = false;
+    g_UnknownException = false;
+    g_ThrowException = throwException;
+    g_Flog = flog;
+    g_ExceptionsThrown = g_Exceptions = 0;
+}
+
+#if TBB_USE_EXCEPTIONS
+class test_exception : public std::exception {
+    const char* my_description;
+public:
+    test_exception ( const char* description ) : my_description(description) {}
+
+    const char* what() const throw() { return my_description; }
+};
+
+class solitary_test_exception : public test_exception {
+public:
+    solitary_test_exception ( const char* description ) : test_exception(description) {}
+};
+
+#if TBB_USE_CAPTURED_EXCEPTION
+    typedef tbb::captured_exception PropagatedException;
+    #define EXCEPTION_NAME(e) e.name()
+#else
+    typedef test_exception PropagatedException;
+    #define EXCEPTION_NAME(e) typeid(e).name()
+#endif
+
+#define EXCEPTION_DESCR "Test exception"
+
+#if HARNESS_EH_SIMPLE_MODE
+
+static void ThrowTestException () { 
+    ++g_ExceptionsThrown;
+    throw test_exception(EXCEPTION_DESCR);
+}
+
+#else /* !HARNESS_EH_SIMPLE_MODE */
+
+static void ThrowTestException ( intptr_t threshold ) {
+    if ( !g_ThrowException || (!g_Flog && (g_ExceptionInMaster ^ (Harness::CurrentTid() == g_Master))) )
+        return; 
+    while ( Existed() < threshold )
+        __TBB_Yield();
+    if ( !g_SolitaryException ) {
+        ++g_ExceptionsThrown;
+        throw test_exception(EXCEPTION_DESCR);
+    }
+    if ( g_ExceptionsThrown.compare_and_swap(1, 0) == 0 )
+        throw solitary_test_exception(EXCEPTION_DESCR);
+}
+#endif /* !HARNESS_EH_SIMPLE_MODE */
+
+#define CATCH()     \
+    } catch ( PropagatedException& e ) { \
+        g_ExecutedAtCatch = g_CurExecuted; \
+        ASSERT( e.what(), "Empty what() string" );  \
+        ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(EXCEPTION_NAME(e), (g_SolitaryException ? typeid(solitary_test_exception) : typeid(test_exception)).name() ) == 0, "Unexpected original exception name"); \
+        ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR) == 0, "Unexpected original exception info"); \
+        g_ExceptionCaught = exceptionCaught = true; \
+        ++g_Exceptions; \
+    } catch ( tbb::tbb_exception& e ) { \
+        REPORT("Unexpected %s\n", e.name()); \
+        ASSERT (g_UnknownException && !g_UnknownException, "Unexpected tbb::tbb_exception" ); \
+    } catch ( std::exception& e ) { \
+        REPORT("Unexpected %s\n", typeid(e).name()); \
+        ASSERT (g_UnknownException && !g_UnknownException, "Unexpected std::exception" ); \
+    } catch ( ... ) { \
+        g_ExceptionCaught = exceptionCaught = true; \
+        g_UnknownException = unknownException = true; \
+    } \
+    if ( !g_SolitaryException ) \
+        REMARK_ONCE ("Multiple exceptions mode: %d throws", (intptr_t)g_ExceptionsThrown);
+
+#define ASSERT_EXCEPTION() \
+    ASSERT (g_ExceptionsThrown ? g_ExceptionCaught : true, "throw without catch"); \
+    ASSERT (!g_ExceptionsThrown ? !g_ExceptionCaught : true, "catch without throw"); \
+    ASSERT (g_ExceptionCaught, "no exception occurred"); \
+    ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException, "unknown exception was caught")
+
+#define CATCH_AND_ASSERT() \
+    CATCH() \
+    ASSERT_EXCEPTION()
+
+#else /* !TBB_USE_EXCEPTIONS */
+
+inline void ThrowTestException ( intptr_t ) {}
+
+#endif /* !TBB_USE_EXCEPTIONS */
+
+#define TRY()   \
+    bool exceptionCaught = false, unknownException = false;    \
+    __TBB_TRY {
+
+// "exceptionCaught || unknownException" is used only to "touch" otherwise unused local variables
+#define CATCH_AND_FAIL() } __TBB_CATCH(...) { \
+        ASSERT (false, "Canceling tasks must not cause any exceptions");    \
+        (void)(exceptionCaught && unknownException);                        \
+    }
+
+const int c_Timeout = 1000000;
+
+void WaitUntilConcurrencyPeaks ( int expected_peak ) {
+    if ( g_Flog )
+        return;
+    int n = 0;
+retry:
+    while ( ++n < c_Timeout && (int)Harness::ConcurrencyTracker::PeakParallelism() < expected_peak )
+        __TBB_Yield();
+    ASSERT_WARNING( n < c_Timeout, "Missed wakeup or machine is overloaded?" );
+    // Workaround in case a missed wakeup takes place
+    if ( n == c_Timeout ) {
+        tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task();
+        r.spawn(r);
+        n = 0;
+        goto retry;
+    }
+}
+
+inline void WaitUntilConcurrencyPeaks () { WaitUntilConcurrencyPeaks(g_NumThreads); }
+
+inline bool IsMaster() {
+    return Harness::CurrentTid() == g_Master;
+}
+
+inline bool IsThrowingThread() {
+    return g_ExceptionInMaster ^ IsMaster() ? true : false;
+}
+
+class CancellatorTask : public tbb::task {
+    static volatile bool s_Ready;
+    tbb::task_group_context &m_groupToCancel;
+    intptr_t m_cancellationThreshold;
+
+    tbb::task* execute () {
+        Harness::ConcurrencyTracker ct;
+        s_Ready = true;
+        while ( g_CurExecuted < m_cancellationThreshold )
+            __TBB_Yield();
+        m_groupToCancel.cancel_group_execution();
+        g_ExecutedAtCatch = g_CurExecuted;
+        return NULL;
+    }
+public:
+    CancellatorTask ( tbb::task_group_context& ctx, intptr_t threshold )
+        : m_groupToCancel(ctx), m_cancellationThreshold(threshold)
+    {
+        s_Ready = false;
+    }
+
+    static void Reset () { s_Ready = false; }
+
+    static bool WaitUntilReady () {
+        const intptr_t limit = 10000000;
+        intptr_t n = 0;
+        do {
+            __TBB_Yield();
+        } while( !s_Ready && ++n < limit );
+        ASSERT( s_Ready || n == limit, NULL );
+        return s_Ready;
+    }
+};
+
+volatile bool CancellatorTask::s_Ready = false;
+
+template<class LauncherTaskT, class CancellatorTaskT>
+void RunCancellationTest ( intptr_t threshold = 1 )
+{
+    tbb::task_group_context  ctx;
+    tbb::empty_task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task;
+    r.set_ref_count(3);
+    r.spawn( *new( r.allocate_child() ) CancellatorTaskT(ctx, threshold) );
+    __TBB_Yield();
+    r.spawn( *new( r.allocate_child() ) LauncherTaskT(ctx) );
+    TRY();
+        r.wait_for_all();
+    CATCH_AND_FAIL();
+    r.destroy(r);
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_inject_scheduler.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_inject_scheduler.h
new file mode 100644 (file)
index 0000000..3c7ac61
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Used in tests that work with TBB scheduler but do not link to the TBB library.
+// In other words it embeds the TBB library core into the test executable.
+
+#ifndef harness_inject_scheduler_H
+#define harness_inject_scheduler_H
+
+// Suppress usage of #pragma comment
+#define __TBB_NO_IMPLICIT_LINKAGE 1
+
+#define __TBB_TASK_CPP_DIRECTLY_INCLUDED 1
+#include "../tbb/tbb_main.cpp"
+
+// Tasking subsystem files
+#include "../tbb/governor.cpp"
+#if __TBB_ARENA_PER_MASTER
+#include "../tbb/market.cpp"
+#endif /* __TBB_ARENA_PER_MASTER */
+#include "../tbb/arena.cpp"
+#include "../tbb/scheduler.cpp"
+#include "../tbb/observer_proxy.cpp"
+#include "../tbb/task.cpp"
+#include "../tbb/task_group_context.cpp"
+
+// Other dependencies
+#include "../tbb/cache_aligned_allocator.cpp"
+#include "../tbb/dynamic_link.cpp"
+#include "../tbb/tbb_thread.cpp"
+#include "../tbb/mutex.cpp"
+#include "../tbb/spin_rw_mutex.cpp"
+#include "../tbb/spin_mutex.cpp"
+#include "../tbb/private_server.cpp"
+#include "../rml/client/rml_tbb.cpp"
+
+#endif /* harness_inject_scheduler_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_iterator.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_iterator.h
new file mode 100644 (file)
index 0000000..c3dbd02
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef harness_iterator_H
+#define harness_iterator_H
+
+#include <iterator>
+#include <memory>
+
+namespace Harness {
+
+template <class T>
+class InputIterator {
+    T * my_ptr;
+public:
+#if HARNESS_EXTENDED_STD_COMPLIANCE
+    typedef std::input_iterator_tag iterator_category;
+    typedef T value_type;
+    typedef typename std::allocator<T>::difference_type difference_type;
+    typedef typename std::allocator<T>::pointer pointer;
+    typedef typename std::allocator<T>::reference reference;
+#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */
+   
+    explicit InputIterator( T * ptr): my_ptr(ptr){}
+    
+    T& operator* () { return *my_ptr; }
+    
+    InputIterator& operator++ () { ++my_ptr; return *this; }
+
+    bool operator== ( const InputIterator& r ) { return my_ptr == r.my_ptr; }
+};
+
+template <class T>
+class ForwardIterator {
+    T * my_ptr;
+public:
+#if HARNESS_EXTENDED_STD_COMPLIANCE
+    typedef std::forward_iterator_tag iterator_category;
+    typedef T value_type;
+    typedef typename std::allocator<T>::difference_type difference_type;
+    typedef typename std::allocator<T>::pointer pointer;
+    typedef typename std::allocator<T>::reference reference;
+#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */
+   
+    explicit ForwardIterator ( T * ptr ) : my_ptr(ptr){}
+    
+    ForwardIterator ( const ForwardIterator& r ) : my_ptr(r.my_ptr){}
+    
+    T& operator* () { return *my_ptr; }
+    
+    ForwardIterator& operator++ () { ++my_ptr; return *this; }
+
+    bool operator== ( const ForwardIterator& r ) { return my_ptr == r.my_ptr; }
+};
+
+template <class T>
+class RandomIterator {
+    T * my_ptr;
+#if !HARNESS_EXTENDED_STD_COMPLIANCE
+    typedef typename std::allocator<T>::difference_type difference_type;
+#endif
+
+public:
+#if HARNESS_EXTENDED_STD_COMPLIANCE
+    typedef std::random_access_iterator_tag iterator_category;
+    typedef T value_type;
+    typedef typename std::allocator<T>::pointer pointer;
+    typedef typename std::allocator<T>::reference reference;
+    typedef typename std::allocator<T>::difference_type difference_type;
+#endif /* HARNESS_EXTENDED_STD_COMPLIANCE */
+
+    explicit RandomIterator ( T * ptr ) : my_ptr(ptr){}
+    RandomIterator ( const RandomIterator& r ) : my_ptr(r.my_ptr){}
+    T& operator* () { return *my_ptr; }
+    RandomIterator& operator++ () { ++my_ptr; return *this; }
+    bool operator== ( const RandomIterator& r ) { return my_ptr == r.my_ptr; }
+    difference_type operator- (const RandomIterator &r) {return my_ptr - r.my_ptr;}
+    RandomIterator operator+ (difference_type n) {return RandomIterator(my_ptr + n);}
+};
+
+} // namespace Harness
+
+#if !HARNESS_EXTENDED_STD_COMPLIANCE
+namespace std {
+    template<typename T>
+    struct iterator_traits< Harness::InputIterator<T> > {
+        typedef std::input_iterator_tag iterator_category;
+        typedef T value_type;
+    };
+
+    template<typename T>
+    struct iterator_traits< Harness::ForwardIterator<T> > {
+        typedef std::forward_iterator_tag iterator_category;
+        typedef T value_type;
+    };
+
+    template<typename T>
+    struct iterator_traits< Harness::RandomIterator<T> > {
+        typedef std::random_access_iterator_tag iterator_category;
+        typedef T value_type;
+    };
+} // namespace std
+#endif /* !HARNESS_EXTENDED_STD_COMPLIANCE */
+
+#endif //harness_iterator_H
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_m128.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_m128.h
new file mode 100644 (file)
index 0000000..88cd15c
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Header that sets HAVE_m128 if we have type __m128
+
+#if (__SSE__||_M_IX86) && !defined(__sun)
+#include <xmmintrin.h>
+#define HAVE_m128 1
+
+//! Class for testing safety of using __m128
+/** Uses circuitous logic forces compiler to put __m128 objects on stack while
+    executing various methods, and thus tempt it to use aligned loads and stores
+    on the stack. */
+//  Do not create file-scope objects of the class, because MinGW (as of May 2010)
+//  did not always provide proper stack alignment in destructors of such objects.
+class ClassWithSSE {
+    static const int n = 16;
+    __m128 field[n];
+    void init( int start );
+public:
+    ClassWithSSE() {init(-n);} 
+    ClassWithSSE( int i ) {init(i);}
+    void operator=( const ClassWithSSE& src ) {
+        __m128 stack[n];
+        for( int i=0; i<n; ++i )
+            stack[i^5] = src.field[i];
+        for( int i=0; i<n; ++i )
+            field[i^5] = stack[i];
+    }
+    ~ClassWithSSE() {init(-2*n);}
+    friend bool operator==( const ClassWithSSE& x, const ClassWithSSE& y ) {
+        for( int i=0; i<4*n; ++i )
+            if( ((const float*)x.field)[i]!=((const float*)y.field)[i] )
+                return false;
+        return true;
+    }
+    friend bool operator!=( const ClassWithSSE& x, const ClassWithSSE& y ) {
+        return !(x==y);
+    }
+};
+
+void ClassWithSSE::init( int start ) {
+    __m128 stack[n];
+    for( int i=0; i<n; ++i ) {
+        // Declaring value as a one-element array instead of a scalar quites 
+        // gratuitous warnings about possible use of "value" before it was set.
+        __m128 value[1];
+        for( int j=0; j<4; ++j )
+            ((float*)value)[j] = float(n*start+4*i+j);
+        stack[i^5] = value[0];
+    }
+    for( int i=0; i<n; ++i )
+        field[i^5] = stack[i];
+}
+
+#endif /* __SSE__||_M_IX86 */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_memory.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_memory.h
new file mode 100644 (file)
index 0000000..dca372f
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Declarations for simple estimate of the memory being used by a program.
+// Not yet implemented for Mac.
+// This header is an optional part of the test harness.
+// It assumes that "harness_assert.h" has already been included.
+
+#if __linux__ || __sun
+#include <sys/resource.h>
+#include <unistd.h>
+
+#elif __APPLE__
+#include <unistd.h>
+#include <mach/mach.h>
+#include <AvailabilityMacros.h>
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060
+#include <mach/shared_region.h>
+#else
+#include <mach/shared_memory_server.h>
+#endif
+#if SHARED_TEXT_REGION_SIZE || SHARED_DATA_REGION_SIZE
+const size_t shared_size = SHARED_TEXT_REGION_SIZE+SHARED_DATA_REGION_SIZE;
+#else
+const size_t shared_size = 0;
+#endif
+
+#elif _WIN32 && !_XBOX
+#include <windows.h>
+#include <psapi.h>
+#if _MSC_VER
+#pragma comment(lib, "psapi")
+#endif
+
+#endif /* OS selection */
+
+//! Return estimate of number of bytes of memory that this program is currently using.
+/* Returns 0 if not implemented on platform. */
+size_t GetMemoryUsage() { 
+#if _XBOX
+    return 0;
+#elif _WIN32
+    PROCESS_MEMORY_COUNTERS mem;
+    bool status = GetProcessMemoryInfo(GetCurrentProcess(), &mem, sizeof(mem))!=0;
+    ASSERT(status, NULL);
+    return mem.PagefileUsage;
+#elif __linux__
+    FILE* statsfile = fopen("/proc/self/statm","r");
+    size_t pagesize = getpagesize();
+    ASSERT(statsfile, NULL);
+    long total_mem;
+    int n = fscanf(statsfile,"%lu",&total_mem);
+    if( n!=1 ) {
+        REPORT("Warning: memory usage statistics wasn't obtained\n");
+        return 0;
+    }
+    fclose(statsfile);
+    return total_mem*pagesize;
+#elif __APPLE__
+    kern_return_t status;
+    task_basic_info info;
+    mach_msg_type_number_t msg_type = TASK_BASIC_INFO_COUNT;
+    status = task_info(mach_task_self(), TASK_BASIC_INFO, reinterpret_cast<task_info_t>(&info), &msg_type);
+    ASSERT(status==KERN_SUCCESS, NULL);
+    return info.virtual_size - shared_size;
+#else
+    return 0;
+#endif
+}
+
+//! Use approximately a specified amount of stack space.
+/** Recursion is used here instead of alloca because some implementations of alloca do not use the stack. */
+void UseStackSpace( size_t amount, char* top=0 ) {
+    char x[1000];
+    memset( x, -1, sizeof(x) );
+    if( !top ) 
+        top = x;
+    ASSERT( x<=top, "test assumes that stacks grow downwards" );
+    if( size_t(top-x)<amount )
+        UseStackSpace( amount, top );
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_report.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/harness_report.h
new file mode 100644 (file)
index 0000000..455c44b
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Just the tracing portion of the harness.
+//
+// This header defines TRACE and TRCAENL macros, which use REPORT like syntax and 
+// are useful for duplicating trace output to the standard debug output on Windows.
+// It is possible to add the ability of automatic extending messages with additional
+// info (file, line, function, time, thread ID, ...).
+//
+// Macros output nothing when test app runs in non-verbose mode (default).
+//
+// The full "harness.h" must be included before this header.
+
+#ifndef tbb_tests_harness_report_H
+#define tbb_tests_harness_report_H
+
+#if defined(MAX_TRACE_SIZE) && MAX_TRACE_SIZE < 1024
+    #undef MAX_TRACE_SIZE
+#endif
+#ifndef MAX_TRACE_SIZE
+    #define MAX_TRACE_SIZE  1024
+#endif
+
+#if __SUNPRO_CC
+#include <stdio.h>
+#else
+#include <cstdio>
+#endif
+
+#include <cstdarg>
+
+
+#ifdef HARNESS_INCOMPLETE_SOURCES
+#error Source files are not complete. Check the build environment
+#endif
+
+#if _MSC_VER
+    #define snprintf _snprintf
+#if _MSC_VER<=1400
+    #define vsnprintf _vsnprintf
+#endif
+#endif
+
+namespace Harness {
+    namespace internal {
+
+#ifndef TbbHarnessReporter
+    struct TbbHarnessReporter {
+        void Report ( const char* msg ) {
+            printf( "%s", msg );
+            fflush(stdout);
+#ifdef _WINDOWS_
+            OutputDebugStringA(msg);
+#endif
+        }
+    }; // struct TbbHarnessReporter
+#endif /* !TbbHarnessReporter */
+
+    class Tracer {
+        int         m_flags;
+        const char  *m_file;
+        const char  *m_func;
+        size_t      m_line;
+
+        TbbHarnessReporter m_reporter;
+
+    public:
+        enum  { 
+            prefix = 1,
+            need_lf = 2
+        };
+
+        Tracer*  set_trace_info ( int flags, const char *file, size_t line, const char *func ) {
+            m_flags = flags;
+            m_line = line;
+            m_file = file;
+            m_func = func;
+            return  this;
+        }
+
+        void  trace ( const char* fmt, ... ) {
+            char    msg[MAX_TRACE_SIZE];
+            char    msg_fmt_buf[MAX_TRACE_SIZE];
+            const char  *msg_fmt = fmt;
+            if ( m_flags & prefix ) {
+                snprintf (msg_fmt_buf, MAX_TRACE_SIZE, "[%s] %s", m_func, fmt);
+                msg_fmt = msg_fmt_buf;
+            }
+            std::va_list argptr;
+            va_start (argptr, fmt);
+            int len = vsnprintf (msg, MAX_TRACE_SIZE, msg_fmt, argptr);
+            va_end (argptr);
+            if ( m_flags & need_lf &&  
+                 len < MAX_TRACE_SIZE - 1  &&  msg_fmt[len-1] != '\n' )
+            {
+                msg[len] = '\n';
+                msg[len + 1] = 0;
+            }
+            m_reporter.Report(msg);
+        }
+    }; // class Tracer
+
+    static Tracer tracer;
+
+    template<int>
+    bool not_the_first_call () {
+        static bool first_call = false;
+        bool res = first_call;
+        first_call = true;
+        return res;
+    }
+
+    } // namespace internal
+} // namespace Harness
+
+#if defined(_MSC_VER)  &&  _MSC_VER >= 1300  ||  defined(__GNUC__)  ||  defined(__GNUG__)
+    #define HARNESS_TRACE_ORIG_INFO __FILE__, __LINE__, __FUNCTION__
+#else
+    #define HARNESS_TRACE_ORIG_INFO __FILE__, __LINE__, ""
+    #define __FUNCTION__ ""
+#endif
+
+
+//! printf style tracing macro
+/** This variant of TRACE adds trailing line-feed (new line) character, if it is absent. **/
+#define TRACE Harness::internal::tracer.set_trace_info(Harness::internal::Tracer::need_lf, HARNESS_TRACE_ORIG_INFO)->trace
+
+//! printf style tracing macro without automatic new line character adding
+#define TRACENL Harness::internal::tracer.set_trace_info(0, HARNESS_TRACE_ORIG_INFO)->trace
+
+//! printf style tracing macro with additional information prefix (e.g. current function name)
+#define TRACEP Harness::internal::tracer.set_trace_info(Harness::internal::Tracer::prefix | \
+                                    Harness::internal::Tracer::need_lf, HARNESS_TRACE_ORIG_INFO)->trace
+
+//! printf style remark macro
+/** Produces output only when the test is run with the -v (verbose) option. **/
+#define REMARK  !Verbose ? (void)0 : TRACENL
+
+//! printf style remark macro
+/** Produces output only when invoked first time. 
+    Only one instance of this macro is allowed per source code line. **/
+#define REMARK_ONCE (!Verbose || Harness::internal::not_the_first_call<__LINE__>()) ? (void)0 : TRACE
+
+//! printf style reporting macro
+/** On heterogeneous platforms redirects its output to the host side. **/
+#define REPORT TRACENL
+
+#endif /* tbb_tests_harness_report_H */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator.cpp
new file mode 100644 (file)
index 0000000..b0c5904
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test whether scalable_allocator complies with the requirements in 20.1.5 of ISO C++ Standard (1998).
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+
+#include "tbb/scalable_allocator.h"
+
+// the actual body of the test is there:
+#include "test_allocator.h"
+
+#if _MSC_VER
+#include "tbb/machine/windows_api.h"
+#endif /* _MSC_VER */
+
+int TestMain () {
+#if _MSC_VER && !__TBBMALLOC_NO_IMPLICIT_LINKAGE
+    #ifdef _DEBUG
+        ASSERT(!GetModuleHandle("tbbmalloc.dll") && GetModuleHandle("tbbmalloc_debug.dll"),
+            "test linked with wrong (non-debug) tbbmalloc library");
+    #else
+        ASSERT(!GetModuleHandle("tbbmalloc_debug.dll") && GetModuleHandle("tbbmalloc.dll"),
+            "test linked with wrong (debug) tbbmalloc library");
+    #endif
+#endif /* _MSC_VER && !__TBBMALLOC_NO_IMPLICIT_LINKAGE */
+    int result = TestMain<tbb::scalable_allocator<void> >();
+    ASSERT( !result, NULL );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator_STL.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ScalableAllocator_STL.cpp
new file mode 100644 (file)
index 0000000..34c6d3c
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test whether scalable_allocator works with some of the host's STL containers.
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "tbb/scalable_allocator.h"
+
+// The actual body of the test is there:
+#include "test_allocator_STL.h"
+
+int TestMain () {
+    TestAllocatorWithSTL<tbb::scalable_allocator<void> >();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_aligned_space.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_aligned_space.cpp
new file mode 100644 (file)
index 0000000..95d6c8e
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+
+#if __TBB_GCC_WARNING_SUPPRESSION_ENABLED
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#endif
+
+//! Wrapper around T where all members are private.
+/** Used to prove that aligned_space<T,N> never calls member of T. */
+template<typename T>
+class Minimal {
+    Minimal();
+    Minimal( Minimal& min );
+    ~Minimal();
+    void operator=( const Minimal& );
+    T pad;
+    template<typename U>
+    friend void AssignToCheckAlignment( Minimal<U>& dst, const Minimal<U>& src ) ;
+};
+
+template<typename T>
+void AssignToCheckAlignment( Minimal<T>& dst, const Minimal<T>& src ) {
+    dst.pad = src.pad;
+}
+
+#include "tbb/aligned_space.h"
+#include "harness_assert.h"
+
+static bool SpaceWasted;
+
+template<typename U, size_t N>
+void TestAlignedSpaceN() {
+    typedef Minimal<U> T;
+    struct {
+        //! Pad byte increases chance that subsequent member will be misaligned if there is a problem.
+        char pad;
+        tbb::aligned_space<T ,N> space;
+    } x;
+    AssertSameType( static_cast< T *>(0), x.space.begin() );
+    AssertSameType( static_cast< T *>(0), x.space.end() );
+    ASSERT( reinterpret_cast<void *>(x.space.begin())==reinterpret_cast< void *>(&x.space), NULL );
+    ASSERT( x.space.end()-x.space.begin()==N, NULL );
+    ASSERT( reinterpret_cast<void *>(x.space.begin())>=reinterpret_cast< void *>(&x.space), NULL );
+    ASSERT( x.space.end()<=reinterpret_cast< T *>(&x.space+1), NULL );
+    // Though not required, a good implementation of aligned_space<T,N> does not use any more space than a T[N].
+    SpaceWasted |= sizeof(x.space)!=sizeof(T)*N;
+    for( size_t k=1; k<N; ++k )
+        AssignToCheckAlignment( x.space.begin()[k-1], x.space.begin()[k] );
+}
+
+static void PrintSpaceWastingWarning( const char* type_name );
+
+#include <typeinfo>
+
+template<typename T>
+void TestAlignedSpace() {
+    SpaceWasted = false;
+    TestAlignedSpaceN<T,1>();
+    TestAlignedSpaceN<T,2>();
+    TestAlignedSpaceN<T,3>();
+    TestAlignedSpaceN<T,4>();
+    TestAlignedSpaceN<T,5>();
+    TestAlignedSpaceN<T,6>();
+    TestAlignedSpaceN<T,7>();
+    TestAlignedSpaceN<T,8>();
+    if( SpaceWasted )
+        PrintSpaceWastingWarning( typeid(T).name() );
+}
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+
+#include "harness_m128.h"
+
+int TestMain () {
+    TestAlignedSpace<char>();
+    TestAlignedSpace<short>();
+    TestAlignedSpace<int>();
+    TestAlignedSpace<float>();
+    TestAlignedSpace<double>();
+    TestAlignedSpace<long double>();
+    TestAlignedSpace<size_t>();
+#if HAVE_m128
+    TestAlignedSpace<__m128>();
+#endif /* HAVE_m128 */
+    return Harness::Done;
+}
+
+static void PrintSpaceWastingWarning( const char* type_name ) {
+    REPORT("Consider rewriting aligned_space<%s,N> to waste less space\n", type_name ); 
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator.h
new file mode 100644 (file)
index 0000000..d8a10e2
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Basic testing of an allocator
+// Tests against requirements in 20.1.5 of ISO C++ Standard (1998).
+// Does not check for thread safety or false sharing issues.
+//
+// Tests for compatibility with the host's STL are in 
+// test_Allocator_STL.h.  Those tests are in a separate file
+// because they bring in lots of STL headers, and the tests here
+// are supposed to work in the abscense of STL.
+
+#include "harness.h"
+
+template<typename A>
+struct is_zero_filling {
+    static const bool value = false;
+};
+
+int NumberOfFoo;
+
+template<typename T, size_t N>
+struct Foo {
+    T foo_array[N];
+    Foo() {
+        zero_fill<T>(foo_array, N);
+        ++NumberOfFoo;
+    }
+    Foo( const Foo& x ) {
+        *this = x;
+        ++NumberOfFoo;
+    }
+    ~Foo() {
+        --NumberOfFoo;
+    }
+};
+
+inline char PseudoRandomValue( size_t j, size_t k ) {
+    return char(j*3 ^ j>>4 ^ k);
+}
+
+//! T is type and A is allocator for that type 
+template<typename T, typename A>
+void TestBasic( A& a ) {
+    T x;
+    const T cx = T();
+
+    // See Table 32 in ISO ++ Standard
+    typename A::pointer px = &x;
+    typename A::const_pointer pcx = &cx;
+
+    typename A::reference rx = x;
+    ASSERT( &rx==&x, NULL );
+
+    typename A::const_reference rcx = cx;
+    ASSERT( &rcx==&cx, NULL );
+
+    typename A::value_type v = x;
+
+    typename A::size_type size;
+    size = 0;
+    --size;
+    ASSERT( size>0, "not an unsigned integral type?" );
+
+    typename A::difference_type difference;
+    difference = 0;
+    --difference;
+    ASSERT( difference<0, "not an signed integral type?" );
+
+    // "rebind" tested by our caller 
+
+    ASSERT( a.address(rx)==px, NULL );
+
+    ASSERT( a.address(rcx)==pcx, NULL );
+
+    typename A::pointer array[100];
+    size_t sizeof_T = sizeof(T);
+    for( size_t k=0; k<100; ++k ) {
+        array[k] = k&1 ? a.allocate(k,array[0]) : a.allocate(k);
+        char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[k]));
+        for( size_t j=0; j<k*sizeof_T; ++j )
+            s[j] = PseudoRandomValue(j,k);
+    }
+
+    // Test hint argument. This can't be compiled when hint is void*, It should be const void*
+    typename A::pointer a_ptr;
+    const void * const_hint = NULL;    
+    a_ptr = a.allocate (1, const_hint);    
+    a.deallocate(a_ptr, 1);
+
+    // Test "a.deallocate(p,n)
+    for( size_t k=0; k<100; ++k ) {
+        char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[k]));
+        for( size_t j=0; j<k*sizeof_T; ++j )
+            ASSERT( s[j] == PseudoRandomValue(j,k), NULL );
+        a.deallocate(array[k],k);
+    }
+
+    // Test "a.max_size()"
+    AssertSameType( a.max_size(), typename A::size_type(0) );
+    // Following assertion catches case where max_size() is so large that computation of 
+    // number of bytes for such an allocation would overflow size_type.
+    ASSERT( a.max_size()*typename A::size_type(sizeof(T))>=a.max_size(), "max_size larger than reasonable" ); 
+
+    // Test "a1==a2"
+    A a1, a2;
+    ASSERT( a1==a2, NULL );
+
+    // Test "a1!=a2"
+    ASSERT( !(a1!=a2), NULL );
+
+    // Test "a.construct(p,t)"
+    int n = NumberOfFoo;
+    typename A::pointer p = a.allocate(1);
+    a.construct( p, cx );
+    ASSERT( NumberOfFoo==n+1, "constructor for Foo not called?" );
+
+    // Test "a.destroy(p)"
+    a.destroy( p );
+    ASSERT( NumberOfFoo==n, "destructor for Foo not called?" );
+    a.deallocate(p,1);
+}
+
+#include "tbb/blocked_range.h"
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for erroneous "conditional expression is constant" warning in method check_allocate.
+    #pragma warning (disable: 4127)
+#endif
+
+// A is an allocator for some type
+template<typename A>
+struct Body: NoAssign {
+    static const size_t max_k = 100000;
+    A &a;
+    Body(A &a_) : a(a_) {}
+    void check_allocate( typename A::pointer array[], size_t i, size_t t ) const
+    {
+        ASSERT(array[i] == 0, NULL);
+        size_t size = i * (i&3);
+        array[i] = i&1 ? a.allocate(size, array[i>>3]) : a.allocate(size);
+        char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[i]));
+        for( size_t j=0; j<size*sizeof(A); ++j ) {
+            if(is_zero_filling<typename A::template rebind<void>::other>::value)
+                ASSERT( !s[j], NULL);
+            s[j] = PseudoRandomValue(i, t);
+        }
+    }
+
+    void check_deallocate( typename A::pointer array[], size_t i, size_t t ) const
+    {
+        ASSERT(array[i] != 0, NULL);
+        size_t size = i * (i&3);
+        char* s = reinterpret_cast<char*>(reinterpret_cast<void*>(array[i]));
+        for( size_t j=0; j<size*sizeof(A); ++j )
+            ASSERT( s[j] == PseudoRandomValue(i, t), "Thread safety test failed" );
+        a.deallocate(array[i], size);
+        array[i] = 0;
+    }
+
+    void operator()( size_t thread_id ) const {
+        typename A::pointer array[256];
+
+        for( size_t k=0; k<256; ++k )
+            array[k] = 0;
+        for( size_t k=0; k<max_k; ++k ) {
+            size_t i = static_cast<unsigned char>(PseudoRandomValue(k,thread_id));
+            if(!array[i]) check_allocate(array, i, thread_id);
+            else check_deallocate(array, i, thread_id);
+        }
+        for( size_t k=0; k<256; ++k )
+            if(array[k])
+                check_deallocate(array, k, thread_id);
+    }
+};
+
+// A is an allocator for some type, and U is another type
+template<typename A, typename U>
+void Test() {
+    typename A::template rebind<U>::other b;
+    TestBasic<U>(b);
+
+    A a(b);
+    TestBasic<typename A::value_type>(a);
+
+    // thread safety
+    int n = NumberOfFoo;
+    NativeParallelFor( 4, Body<A>(a) );
+    ASSERT( NumberOfFoo==n, "Allocate/deallocate count mismatched" );
+    ASSERT( a==b, NULL );
+    ASSERT( !(a!=b), NULL );
+}
+
+template<typename Allocator>
+int TestMain() {
+    Test<typename Allocator::template rebind<Foo<char,1> >::other, Foo<int,17> >();
+    Test<typename Allocator::template rebind<Foo<double,1> >::other, Foo<float,23> >();
+    return 0;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator_STL.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_allocator_STL.h
new file mode 100644 (file)
index 0000000..54820c0
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Tests for compatibility with the host's STL.
+
+#include "harness.h"
+
+template<typename Container>
+void TestSequence() {
+    Container c;
+    for( int i=0; i<1000; ++i )
+        c.push_back(i*i);    
+    typename Container::const_iterator p = c.begin();
+    for( int i=0; i<1000; ++i ) {
+        ASSERT( *p==i*i, NULL );
+        ++p;
+    }
+}
+
+template<typename Set>
+void TestSet() {
+    Set s;
+    typedef typename Set::value_type value_type;
+    for( int i=0; i<100; ++i ) 
+        s.insert(value_type(3*i));
+    for( int i=0; i<300; ++i ) {
+        ASSERT( s.erase(i)==size_t(i%3==0), NULL );
+    }
+}
+
+template<typename Map>
+void TestMap() {
+    Map m;
+    typedef typename Map::value_type value_type;
+    for( int i=0; i<100; ++i ) 
+        m.insert(value_type(i,i*i));
+    for( int i=0; i<100; ++i )
+        ASSERT( m.find(i)->second==i*i, NULL );
+}
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <deque>
+#include <list>
+#include <map>
+#include <set>
+#include <vector>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+template<typename Allocator>
+void TestAllocatorWithSTL() {
+    typedef typename Allocator::template rebind<int>::other Ai;
+    typedef typename Allocator::template rebind<const int>::other Aci;
+    typedef typename Allocator::template rebind<std::pair<const int, int> >::other Acii;
+    typedef typename Allocator::template rebind<std::pair<int, int> >::other Aii;
+
+    // Sequenced containers
+    TestSequence<std::deque <int,Ai> >();
+    TestSequence<std::list  <int,Ai> >();
+    TestSequence<std::vector<int,Ai> >();
+
+    // Associative containers
+    TestSet<std::set     <int, std::less<int>, Ai> >();
+    TestSet<std::multiset<int, std::less<int>, Ai> >();
+    TestMap<std::map     <int, int, std::less<int>, Acii> >();
+    TestMap<std::multimap<int, int, std::less<int>, Acii> >();
+
+#if _MSC_VER
+    // Test compatibility with Microsoft's implementation of std::allocator for some cases that
+    // are undefined according to the ISO standard but permitted by Microsoft.
+    TestSequence<std::deque <const int,Aci> >();
+#if _CPPLIB_VER>=500
+    TestSequence<std::list  <const int,Aci> >();
+#endif
+    TestSequence<std::vector<const int,Aci> >();
+    TestSet<std::set<const int, std::less<int>, Aci> >();
+    TestMap<std::map<int, int, std::less<int>, Aii> >();
+    TestMap<std::map<const int, int, std::less<int>, Acii> >();
+    TestMap<std::multimap<int, int, std::less<int>, Aii> >();
+    TestMap<std::multimap<const int, int, std::less<int>, Acii> >();
+#endif /* _MSC_VER */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_assembly.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_assembly.cpp
new file mode 100644 (file)
index 0000000..0496a13
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Program for basic correctness testing of assembly-language routines.
+
+#include "tbb/task.h"
+
+#include <new>
+#include "harness.h"
+
+using tbb::internal::reference_count;
+
+//! Test __TBB_CompareAndSwapW
+static void TestCompareExchange() {
+    ASSERT( intptr_t(-10)<10, "intptr_t not a signed integral type?" ); 
+    REMARK("testing __TBB_CompareAndSwapW\n");
+    for( intptr_t a=-10; a<10; ++a )
+        for( intptr_t b=-10; b<10; ++b )
+            for( intptr_t c=-10; c<10; ++c ) {
+// Workaround for a bug in GCC 4.3.0; and one more is below.
+#if __GNUC__==4&&__GNUC_MINOR__==3&&__GNUC_PATCHLEVEL__==0
+                intptr_t x;
+                __TBB_store_with_release( x, a );
+#else
+                intptr_t x = a;
+#endif
+                intptr_t y = __TBB_CompareAndSwapW(&x,b,c);
+                ASSERT( y==a, NULL ); 
+                if( a==c ) 
+                    ASSERT( x==b, NULL );
+                else
+                    ASSERT( x==a, NULL );
+            }
+}
+
+//! Test __TBB___TBB_FetchAndIncrement and __TBB___TBB_FetchAndDecrement
+static void TestAtomicCounter() {
+    // "canary" is a value used to detect illegal overwrites.
+    const reference_count canary = ~(uintptr_t)0/3;
+    REMARK("testing __TBB_FetchAndIncrement\n");
+    struct {
+        reference_count prefix, i, suffix;
+    } x;
+    x.prefix = canary;
+    x.i = 0;
+    x.suffix = canary;
+    for( int k=0; k<10; ++k ) {
+        reference_count j = __TBB_FetchAndIncrementWacquire((volatile void *)&x.i);
+        ASSERT( x.prefix==canary, NULL );
+        ASSERT( x.suffix==canary, NULL );
+        ASSERT( x.i==k+1, NULL );
+        ASSERT( j==k, NULL );
+    }
+    REMARK("testing __TBB_FetchAndDecrement\n");
+    x.i = 10;
+    for( int k=10; k>0; --k ) {
+        reference_count j = __TBB_FetchAndDecrementWrelease((volatile void *)&x.i);
+        ASSERT( j==k, NULL );
+        ASSERT( x.i==k-1, NULL );
+        ASSERT( x.prefix==canary, NULL );
+        ASSERT( x.suffix==canary, NULL );
+    }
+}
+
+static void TestTinyLock() {
+    REMARK("testing __TBB_LockByte\n");
+    unsigned char flags[16];
+    for( int i=0; i<16; ++i )
+        flags[i] = (unsigned char)i;
+#if __GNUC__==4&&__GNUC_MINOR__==3&&__GNUC_PATCHLEVEL__==0
+    __TBB_store_with_release( flags[8], 0 );
+#else
+    flags[8] = 0;
+#endif
+    __TBB_LockByte(flags[8]);
+    for( int i=0; i<16; ++i )
+       #ifdef __sparc
+        ASSERT( flags[i]==(i==8?0xff:i), NULL );
+       #else
+        ASSERT( flags[i]==(i==8?1:i), NULL );
+       #endif
+}
+
+static void TestLog2() {
+    REMARK("testing __TBB_Log2\n");
+    for( uintptr_t i=1; i; i<<=1 ) {
+        for( uintptr_t j=1; j<1<<16; ++j ) {
+            if( uintptr_t k = i*j ) {
+                uintptr_t actual = __TBB_Log2(k);
+                const uintptr_t ONE = 1; // warning suppression again
+                ASSERT( k >= ONE<<actual, NULL );          
+                ASSERT( k>>1 < ONE<<actual, NULL );        
+            }
+        }
+    }
+}
+
+static void TestPause() {
+    REMARK("testing __TBB_Pause\n");
+    __TBB_Pause(1);
+}
+
+
+int TestMain () {
+    __TBB_TRY {
+        TestLog2();
+        TestTinyLock();
+        TestCompareExchange();
+        TestAtomicCounter();
+        TestPause();
+    } __TBB_CATCH(...) {
+        ASSERT(0,"unexpected exception");
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_atomic.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_atomic.cpp
new file mode 100644 (file)
index 0000000..792ce2a
--- /dev/null
@@ -0,0 +1,810 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Put tbb/atomic.h first, so if it is missing a prerequisite header, we find out about it.
+// The tests here do *not* test for atomicity, just serial correctness. */
+
+#include "tbb/atomic.h"
+#include "harness_assert.h"
+#include <string.h> // memcmp
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // unary minus operator applied to unsigned type, result still unsigned
+    #pragma warning( push )
+    #pragma warning( disable: 4310 )
+#endif
+
+//! Structure that holds an atomic<T> and some guard bytes around it.
+template<typename T>
+struct TestStruct {
+    typedef unsigned char byte_type;
+    T prefix;
+    tbb::atomic<T> counter;
+    T suffix;
+    TestStruct( T i ) {
+        ASSERT( sizeof(*this)==3*sizeof(T), NULL );
+        for (size_t j = 0; j < sizeof(T); ++j) {
+            reinterpret_cast<byte_type*>(&prefix)[j]             = byte_type(0x11*(j+1));
+            reinterpret_cast<byte_type*>(&suffix)[sizeof(T)-j-1] = byte_type(0x11*(j+1));
+        }
+        counter = i;
+    }
+    ~TestStruct() {
+        // Check for writes outside the counter.
+        for (size_t j = 0; j < sizeof(T); ++j) {
+            ASSERT( reinterpret_cast<byte_type*>(&prefix)[j]             == byte_type(0x11*(j+1)), NULL );
+            ASSERT( reinterpret_cast<byte_type*>(&suffix)[sizeof(T)-j-1] == byte_type(0x11*(j+1)), NULL );
+        }
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+#if defined(__INTEL_COMPILER)
+    // reference to EBX in a function requiring stack alignment
+    #pragma warning( disable: 998 )
+#endif
+
+//! Test compare_and_swap template members of class atomic<T> for memory_semantics=M
+template<typename T,tbb::memory_semantics M>
+void TestCompareAndSwapAcquireRelease( T i, T j, T k ) {
+    ASSERT( i!=k, "values must be distinct" ); 
+    // Test compare_and_swap that should fail
+    TestStruct<T> x(i);
+    T old = x.counter.template compare_and_swap<M>( j, k );
+    ASSERT( old==i, NULL );
+    ASSERT( x.counter==i, "old value not retained" );
+    // Test compare and swap that should suceed
+    old = x.counter.template compare_and_swap<M>( j, i );
+    ASSERT( old==i, NULL );
+    ASSERT( x.counter==j, "value not updated?" );
+}
+
+//! i, j, k must be different values
+template<typename T>
+void TestCompareAndSwap( T i, T j, T k ) {
+    ASSERT( i!=k, "values must be distinct" ); 
+    // Test compare_and_swap that should fail
+    TestStruct<T> x(i);
+    T old = x.counter.compare_and_swap( j, k );
+    ASSERT( old==i, NULL );
+    ASSERT( x.counter==i, "old value not retained" );
+    // Test compare and swap that should suceed
+    old = x.counter.compare_and_swap( j, i );
+    ASSERT( old==i, NULL );
+    if( x.counter==i ) {
+        ASSERT( x.counter==j, "value not updated?" );
+    } else {    
+        ASSERT( x.counter==j, "value trashed" );
+    }
+    TestCompareAndSwapAcquireRelease<T,tbb::acquire>(i,j,k);
+    TestCompareAndSwapAcquireRelease<T,tbb::release>(i,j,k);
+}
+
+//! memory_semantics variation on TestFetchAndStore
+template<typename T, tbb::memory_semantics M>
+void TestFetchAndStoreAcquireRelease( T i, T j ) {
+    ASSERT( i!=j, "values must be distinct" ); 
+    TestStruct<T> x(i);
+    T old = x.counter.template fetch_and_store<M>( j );
+    ASSERT( old==i, NULL );
+    ASSERT( x.counter==j, NULL );
+}
+
+//! i and j must be different values
+template<typename T>
+void TestFetchAndStore( T i, T j ) {
+    ASSERT( i!=j, "values must be distinct" ); 
+    TestStruct<T> x(i);
+    T old = x.counter.fetch_and_store( j );
+    ASSERT( old==i, NULL );
+    ASSERT( x.counter==j, NULL );
+    TestFetchAndStoreAcquireRelease<T,tbb::acquire>(i,j);
+    TestFetchAndStoreAcquireRelease<T,tbb::release>(i,j);
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // conversion from <bigger integer> to <smaller integer>, possible loss of data
+    // the warning seems a complete nonsense when issued for e.g. short+=short
+    #pragma warning( push )
+    #pragma warning( disable: 4244 )
+#endif
+
+//! Test fetch_and_add members of class atomic<T> for memory_semantics=M
+template<typename T,tbb::memory_semantics M>
+void TestFetchAndAddAcquireRelease( T i ) {
+    TestStruct<T> x(i);
+    T actual;
+    T expected = i;
+
+    // Test fetch_and_add member template
+    for( int j=0; j<10; ++j ) {
+        actual = x.counter.fetch_and_add(j);
+        ASSERT( actual==expected, NULL );
+        expected += j;
+    }
+    for( int j=0; j<10; ++j ) {
+        actual = x.counter.fetch_and_add(-j);
+        ASSERT( actual==expected, NULL );
+        expected -= j;
+    }
+
+    // Test fetch_and_increment member template
+    ASSERT( x.counter==i, NULL );
+    actual = x.counter.template fetch_and_increment<M>();
+    ASSERT( actual==i, NULL );
+    ASSERT( x.counter==T(i+1), NULL );
+
+    // Test fetch_and_decrement member template
+    actual = x.counter.template fetch_and_decrement<M>();
+    ASSERT( actual==T(i+1), NULL );
+    ASSERT( x.counter==i, NULL );
+}
+
+//! Test fetch_and_add and related operators
+template<typename T>
+void TestFetchAndAdd( T i ) {
+    TestStruct<T> x(i);
+    T value;
+    value = ++x.counter;
+    ASSERT( value==T(i+1), NULL );
+    value = x.counter++;
+    ASSERT( value==T(i+1), NULL );
+    value = x.counter--;
+    ASSERT( value==T(i+2), NULL );
+    value = --x.counter;
+    ASSERT( value==i, NULL );
+    T actual;
+    T expected = i;
+    for( int j=-100; j<=100; ++j ) {
+        expected += j;
+        actual = x.counter += j;
+        ASSERT( actual==expected, NULL );
+    }
+    for( int j=-100; j<=100; ++j ) {
+        expected -= j;
+        actual = x.counter -= j;
+        ASSERT( actual==expected, NULL );
+    }
+    // Test fetch_and_increment
+    ASSERT( x.counter==i, NULL );
+    actual = x.counter.fetch_and_increment();
+    ASSERT( actual==i, NULL );
+    ASSERT( x.counter==T(i+1), NULL );
+
+    // Test fetch_and_decrement
+    actual = x.counter.fetch_and_decrement();
+    ASSERT( actual==T(i+1), NULL );
+    ASSERT( x.counter==i, NULL );
+    x.counter = i;
+    ASSERT( x.counter==i, NULL );
+
+    TestFetchAndAddAcquireRelease<T,tbb::acquire>(i);
+    TestFetchAndAddAcquireRelease<T,tbb::release>(i);
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif // warning 4244 is back
+
+//! A type with unknown size.
+class IncompleteType;
+
+void TestFetchAndAdd( IncompleteType* ) {
+    // There are no fetch-and-add operations on a IncompleteType*.
+}
+void TestFetchAndAdd( void* ) {
+    // There are no fetch-and-add operations on a void*.
+}
+
+void TestFetchAndAdd( bool ) {
+    // There are no fetch-and-add operations on a bool.
+}
+
+template<typename T>
+void TestConst( T i ) { 
+    // Try const 
+    const TestStruct<T> x(i);
+    ASSERT( memcmp( &i, &x.counter, sizeof(T) )==0, "write to atomic<T> broken?" );;
+    ASSERT( x.counter==i, "read of atomic<T> broken?" );
+}
+
+template<typename T>
+void TestOperations( T i, T j, T k ) {
+    TestConst(i);
+    TestCompareAndSwap(i,j,k);
+    TestFetchAndStore(i,k);    // Pass i,k instead of i,j, because callee requires two distinct values.
+}
+
+template<typename T>
+void TestParallel( const char* name );
+
+bool ParallelError;
+
+template<typename T>
+struct AlignmentChecker {
+    char c;
+    tbb::atomic<T> i;
+};
+
+#include "harness.h"
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // unary minus operator applied to unsigned type, result still unsigned
+    #pragma warning( push )
+    #pragma warning( disable: 4146 )
+#endif
+
+/** T is an integral type. */
+template<typename T>
+void TestAtomicInteger( const char* name ) {
+    REMARK("testing atomic<%s> (size=%d)\n",name,sizeof(tbb::atomic<T>));
+#if ( __linux__ && __TBB_x86_32 && __GNUC__==3 && __GNUC_MINOR__==3 ) || defined(__SUNPRO_CC)
+    // gcc 3.3 has known problem for 32-bit Linux, so only warn if there is a problem.
+    // SUNPRO_CC does have this problem as well
+    if( sizeof(T)==8 ) {
+        if( sizeof(AlignmentChecker<T>)!=2*sizeof(tbb::atomic<T>) ) {
+            REPORT("Known issue: alignment for atomic<%s> is wrong with gcc 3.3 and sunCC 5.9 2008/01/28 for IA32\n",name);
+        }
+    } else
+#endif /* ( __linux__ && __TBB_x86_32 && __GNUC__==3 && __GNUC_MINOR__==3 ) || defined(__SUNPRO_CC) */
+    ASSERT( sizeof(AlignmentChecker<T>)==2*sizeof(tbb::atomic<T>), NULL );
+    TestOperations<T>(0L,T(-T(1)),T(1));
+    for( int k=0; k<int(sizeof(long))*8-1; ++k ) {
+        TestOperations<T>(T(1L<<k),T(~(1L<<k)),T(1-(1L<<k)));
+        TestOperations<T>(T(-1L<<k),T(~(-1L<<k)),T(1-(-1L<<k)));
+        TestFetchAndAdd<T>(T(-1L<<k));
+    }
+    TestParallel<T>( name );
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+
+template<typename T>
+struct Foo {
+    T x, y, z;
+};
+
+
+template<typename T>
+void TestIndirection() {
+    Foo<T> item;
+    tbb::atomic<Foo<T>*> pointer;
+    pointer = &item;
+    for( int k=-10; k<=10; ++k ) {
+        // Test various syntaxes for indirection to fields with non-zero offset.   
+        T value1=T(), value2=T();
+        for( size_t j=0; j<sizeof(T); ++j ) {
+            *(char*)&value1 = char(k^j);
+            *(char*)&value2 = char(k^j*j);
+        }
+        pointer->y = value1;
+        (*pointer).z = value2;
+        T result1 = (*pointer).y;
+        T result2 = pointer->z;
+        ASSERT( memcmp(&value1,&result1,sizeof(T))==0, NULL );
+        ASSERT( memcmp(&value2,&result2,sizeof(T))==0, NULL );
+    }
+}
+
+//! Test atomic<T*>
+template<typename T>
+void TestAtomicPointer() {
+    REMARK("testing atomic pointer (%d)\n",int(sizeof(T)));
+    T array[1000];
+    TestOperations<T*>(&array[500],&array[250],&array[750]);
+    TestFetchAndAdd<T*>(&array[500]);
+    TestIndirection<T>();
+    TestParallel<T*>( "pointer" );
+}
+
+//! Test atomic<Ptr> where Ptr is a pointer to a type of unknown size
+template<typename Ptr>
+void TestAtomicPointerToTypeOfUnknownSize( const char* name ) {
+    REMARK("testing atomic<%s>\n",name);
+    char array[1000];
+    TestOperations<Ptr>((Ptr)(void*)&array[500],(Ptr)(void*)&array[250],(Ptr)(void*)&array[750]);
+    TestParallel<Ptr>( name );
+}
+
+void TestAtomicBool() {
+    REMARK("testing atomic<bool>\n");
+    TestOperations<bool>(true,true,false);
+    TestOperations<bool>(false,false,true);
+    TestParallel<bool>( "bool" );
+}
+
+enum Color {Red=0,Green=1,Blue=-1};
+
+void TestAtomicEnum() {
+    REMARK("testing atomic<Color>\n");
+    TestOperations<Color>(Red,Green,Blue);
+    TestParallel<Color>( "Color" );
+}
+
+template<typename T>
+void TestAtomicFloat( const char* name ) {
+    REMARK("testing atomic<%s>\n", name );
+    TestOperations<T>(0.5,3.25,10.75);
+    TestParallel<T>( name );
+}
+
+const int numMaskedOperations = 100000;
+const int testSpaceSize = 8;
+int prime[testSpaceSize] = {3,5,7,11,13,17,19,23};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // "possible loss of data" warning suppressed again
+    #pragma warning( push )
+    #pragma warning( disable: 4244 )
+#endif
+
+template<typename T>
+class TestMaskedCAS_Body: NoAssign {
+    T*  test_space_uncontended;
+    T*  test_space_contended;
+public:   
+    TestMaskedCAS_Body( T* _space1, T* _space2 ) : test_space_uncontended(_space1), test_space_contended(_space2) {}
+    void operator()( int my_idx ) const {
+        using tbb::internal::__TBB_MaskedCompareAndSwap;
+        const T my_prime = T(prime[my_idx]);
+        T* const my_ptr = test_space_uncontended+my_idx;
+        T old_value=0;
+        for( int i=0; i<numMaskedOperations; ++i, old_value+=my_prime ){
+            T result;
+        // Test uncontended case
+            T new_value = old_value + my_prime;
+            // The following CAS should always fail
+            result = __TBB_MaskedCompareAndSwap<sizeof(T),T>(my_ptr,new_value,old_value-1);
+            ASSERT(result!=old_value-1, "masked CAS succeeded while it should fail");
+            ASSERT(result==*my_ptr, "masked CAS result mismatch with real value");
+            // The following one should succeed
+            result = __TBB_MaskedCompareAndSwap<sizeof(T),T>(my_ptr,new_value,old_value);
+            ASSERT(result==old_value && *my_ptr==new_value, "masked CAS failed while it should succeed");
+            // The following one should fail again
+            result = __TBB_MaskedCompareAndSwap<sizeof(T),T>(my_ptr,new_value,old_value);
+            ASSERT(result!=old_value, "masked CAS succeeded while it should fail");
+            ASSERT(result==*my_ptr, "masked CAS result mismatch with real value");
+        // Test contended case
+            for( int j=0; j<testSpaceSize; ++j ){
+                // try adding my_prime until success
+                T value;
+                do {
+                    value = test_space_contended[j];
+                    result = __TBB_MaskedCompareAndSwap<sizeof(T),T>(test_space_contended+j,value+my_prime,value);
+                } while( result!=value );
+            }
+        }
+    }
+};
+
+template<typename T>
+struct intptr_as_array_of
+{
+    static const int how_many_Ts = sizeof(intptr_t)/sizeof(T);
+    union {
+        intptr_t result;
+        T space[ how_many_Ts ];
+    };
+};
+
+template<typename T>
+intptr_t getCorrectUncontendedValue(int slot_idx) {
+    intptr_as_array_of<T> slot;
+    slot.result = 0;
+    for( int i=0; i<slot.how_many_Ts; ++i ) {
+        const T my_prime = T(prime[slot_idx*slot.how_many_Ts + i]);
+        for( int j=0; j<numMaskedOperations; ++j )
+            slot.space[i] += my_prime;
+    }
+    return slot.result;
+}
+
+template<typename T>
+intptr_t getCorrectContendedValue() {
+    intptr_as_array_of<T>  slot;
+    slot.result = 0;
+    for( int i=0; i<slot.how_many_Ts; ++i )
+        for( int primes=0; primes<testSpaceSize; ++primes )
+            for( int j=0; j<numMaskedOperations; ++j )
+                slot.space[i] += prime[primes];
+    return slot.result;
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif // warning 4244 is back again
+
+template<typename T>
+void TestMaskedCAS() {
+    REMARK("testing masked CAS<%d>\n",int(sizeof(T)));
+
+    const int num_slots = sizeof(T)*testSpaceSize/sizeof(intptr_t);
+    intptr_t arr1[num_slots+2]; // two more "canary" slots at boundaries
+    intptr_t arr2[num_slots+2];
+    for(int i=0; i<num_slots+2; ++i)
+        arr2[i] = arr1[i] = 0;
+    T* test_space_uncontended = (T*)(arr1+1);
+    T* test_space_contended = (T*)(arr2+1);
+
+    NativeParallelFor( testSpaceSize, TestMaskedCAS_Body<T>(test_space_uncontended, test_space_contended) );
+
+    ASSERT( arr1[0]==0 && arr1[num_slots+1]==0 && arr2[0]==0 && arr2[num_slots+1]==0 , "adjacent memory was overwritten" );
+    const intptr_t correctContendedValue = getCorrectContendedValue<T>();
+    for(int i=0; i<num_slots; ++i) {
+        ASSERT( arr1[i+1]==getCorrectUncontendedValue<T>(i), "unexpected value in an uncontended slot" );
+        ASSERT( arr2[i+1]==correctContendedValue, "unexpected value in a contended slot" );
+    }
+}
+
+template<unsigned N>
+class ArrayElement {
+    char item[N];
+};
+
+int TestMain () {
+    TestAtomicInteger<unsigned long long>("unsigned long long");
+    TestAtomicInteger<long long>("long long");
+    TestAtomicInteger<unsigned long>("unsigned long");
+    TestAtomicInteger<long>("long");
+    TestAtomicInteger<unsigned int>("unsigned int");
+    TestAtomicInteger<int>("int");
+    TestAtomicInteger<unsigned short>("unsigned short");
+    TestAtomicInteger<short>("short");
+    TestAtomicInteger<signed char>("signed char");
+    TestAtomicInteger<unsigned char>("unsigned char");
+    TestAtomicInteger<char>("char");
+    TestAtomicInteger<wchar_t>("wchar_t");
+    TestAtomicInteger<size_t>("size_t");
+    TestAtomicInteger<ptrdiff_t>("ptrdiff_t");
+    TestAtomicPointer<ArrayElement<1> >();
+    TestAtomicPointer<ArrayElement<2> >();
+    TestAtomicPointer<ArrayElement<3> >();
+    TestAtomicPointer<ArrayElement<4> >();
+    TestAtomicPointer<ArrayElement<5> >();
+    TestAtomicPointer<ArrayElement<6> >();
+    TestAtomicPointer<ArrayElement<7> >();
+    TestAtomicPointer<ArrayElement<8> >();
+    TestAtomicPointerToTypeOfUnknownSize<IncompleteType*>( "IncompleteType*" );
+    TestAtomicPointerToTypeOfUnknownSize<void*>( "void*" );
+    TestAtomicBool();
+    TestAtomicEnum();
+    TestAtomicFloat<float>("float");
+    TestAtomicFloat<double>("double");
+    ASSERT( !ParallelError, NULL );
+    TestMaskedCAS<unsigned char>();
+    TestMaskedCAS<unsigned short>();
+    return Harness::Done;
+}
+
+template<typename T>
+struct FlagAndMessage {
+    //! 0 if message not set yet, 1 if message is set.
+    tbb::atomic<T> flag;
+    /** Force flag and message to be on distinct cache lines for machines with cache line size <= 4096 bytes */
+    char pad[4096/sizeof(T)];
+    //! Non-zero if message is ready
+    T message;    
+};
+
+// A special template function used for summation.
+// Actually it is only necessary because of its specialization for void*
+template<typename T>
+T special_sum(intptr_t arg1, intptr_t arg2) {
+    return (T)((T)arg1 + arg2);
+}
+
+// The specialization for IncompleteType* is required
+// because pointer arithmetic (+) is impossible with IncompleteType*
+template<>
+IncompleteType* special_sum<IncompleteType*>(intptr_t arg1, intptr_t arg2) {
+    return (IncompleteType*)(arg1 + arg2);
+}
+
+// The specialization for void* is required
+// because pointer arithmetic (+) is impossible with void*
+template<>
+void* special_sum<void*>(intptr_t arg1, intptr_t arg2) {
+    return (void*)(arg1 + arg2);
+}
+
+// The specialization for bool is required to shut up gratuitous compiler warnings,
+// because some compilers warn about casting int to bool.
+template<>
+bool special_sum<bool>(intptr_t arg1, intptr_t arg2) {
+    return ((arg1!=0) + arg2)!=0;
+}
+
+volatile int One = 1;
+template<typename T>
+class HammerLoadAndStoreFence: NoAssign {
+    FlagAndMessage<T>* fam;
+    const int n;
+    const int p;
+    const int trial;
+    const char* name;
+    mutable T accum;
+public:
+    HammerLoadAndStoreFence( FlagAndMessage<T>* fam_, int n_, int p_, const char* name_, int trial_ ) : fam(fam_), n(n_), p(p_), trial(trial_), name(name_) {}
+    void operator()( int k ) const {
+        int one = One;
+        FlagAndMessage<T>* s = fam+k;
+        FlagAndMessage<T>* s_next = fam + (k+1)%p;
+        for( int i=0; i<n; ++i ) {
+            // The inner for loop is a spin-wait loop, which is normally considered very bad style. 
+            // But we must use it here because we are interested in examining subtle hardware effects.
+            for(unsigned short cnt=1; ; ++cnt) {
+                if( !cnt ) // to help 1-core systems complete the test, yield every 2^16 iterations
+                    __TBB_Yield();
+                // Compilers typically generate non-trivial sequence for division by a constant.
+                // The expression here is dependent on the loop index i, so it cannot be hoisted.
+#define COMPLICATED_ZERO (i*(one-1)/100)
+                // Read flag and then the message
+                T flag, message;
+                if( trial&1 ) { 
+                    // COMPLICATED_ZERO here tempts compiler to hoist load of message above reading of flag.
+                    flag = (s+COMPLICATED_ZERO)->flag;
+                    message = s->message;
+                } else {
+                    flag = s->flag;
+                    message = s->message;
+                }
+                if( flag ) {
+                    if( flag!=(T)-1 ) {
+                        REPORT("ERROR: flag!=(T)-1 k=%d i=%d trial=%x type=%s (atomicity problem?)\n", k, i, trial, name );
+                        ParallelError = true;
+                    } 
+                    if( message!=(T)-1 ) {
+                        REPORT("ERROR: message!=(T)-1 k=%d i=%d trial=%x type=%s (memory fence problem?)\n", k, i, trial, name );
+                        ParallelError = true;
+                    }
+                    s->message = T(0); 
+                    s->flag = T(0);
+                    // Set message and then the flag
+                    if( trial&2 ) {
+                        // COMPLICATED_ZERO here tempts compiler to sink store below setting of flag
+                        s_next->message = special_sum<T>(-1, COMPLICATED_ZERO);
+                        s_next->flag = (T)-1;
+                    } else {
+                        s_next->message = (T)-1;
+                        s_next->flag = (T)-1;
+                    }
+                    break;
+                } else {
+                    // Force compiler to use message anyway, so it cannot sink read of s->message below the if.
+                    accum = message;
+                }
+            }
+        }
+    }
+};
+
+//! Test that atomic<T> has acquire semantics for loads and release semantics for stores.
+/** Test performs round-robin passing of message among p processors, 
+    where p goes from MinThread to MaxThread. */
+template<typename T>
+void TestLoadAndStoreFences( const char* name ) {
+    for( int p=MinThread<2 ? 2 : MinThread; p<=MaxThread; ++p ) {
+        FlagAndMessage<T>* fam = new FlagAndMessage<T>[p];
+        // Each of four trials excercise slightly different expresion pattern within the test.
+        // See occurrences of COMPLICATED_ZERO for details. 
+        for( int trial=0; trial<4; ++trial ) {
+            memset( fam, 0, p*sizeof(FlagAndMessage<T>) );
+            fam->message = (T)-1;
+            fam->flag = (T)-1;
+            NativeParallelFor( p, HammerLoadAndStoreFence<T>( fam, 100, p, name, trial ) );
+            for( int k=0; k<p; ++k ) {
+                ASSERT( fam[k].message==(k==0 ? (T)-1 : 0), "incomplete round-robin?" ); 
+                ASSERT( fam[k].flag==(k==0 ? (T)-1 : 0), "incomplete round-robin?" ); 
+            }
+        }
+        delete[] fam;
+    }
+}
+
+//! Sparse set of values of integral type T.
+/** Set is designed so that if a value is read or written non-atomically, 
+    the resulting intermediate value is likely to not be a member of the set. */
+template<typename T>
+class SparseValueSet {
+    T factor;
+public:
+    SparseValueSet() {
+        // Compute factor such that:
+        // 1. It has at least one 1 in most of its bytes.
+        // 2. The bytes are typically different.
+        // 3. When multiplied by any value <=127, the product does not overflow.
+        factor = T(0);
+        for( unsigned i=0; i<sizeof(T)*8-7; i+=7 ) 
+            factor = T(factor | T(1)<<i);
+     }
+     //! Get ith member of set
+     T get( int i ) const {
+         // Create multiple of factor.  The & prevents overflow of the product.
+         return T((i&0x7F)*factor);
+     }        
+     //! True if set contains x
+     bool contains( T x ) const {
+         // True if 
+         return (x%factor)==0;
+     }
+};
+
+//! Specialization for pointer types.  The pointers are random and should not be dereferenced.
+template<typename T>
+class SparseValueSet<T*> {
+    SparseValueSet<ptrdiff_t> my_set;
+public:
+    T* get( int i ) const {return reinterpret_cast<T*>(my_set.get(i));} 
+    bool contains( T* x ) const {return my_set.contains(reinterpret_cast<ptrdiff_t>(x));}
+};
+
+//! Specialization for bool.  
+/** Checking bool for atomic read/write is pointless in practice, because 
+    there is no way to *not* atomically read or write a bool value. */
+template<>
+class SparseValueSet<bool> {
+public:
+    bool get( int i ) const {return i&1;}
+    bool contains( bool ) const {return true;}
+};
+
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+    // VS2008/VC9 seems to have an issue; limits pull in math.h
+    #pragma warning( push )
+    #pragma warning( disable: 4985 )
+#endif
+#include <limits> /* Need std::numeric_limits */
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+//! Commonality inherited by specializations for floating-point types.
+template<typename T>
+class SparseFloatSet: NoAssign {
+    const T epsilon;
+public:
+    SparseFloatSet() : epsilon(std::numeric_limits<T>::epsilon()) {}
+    T get( int i ) const {
+        return i==0 ? T(0) : 1/T((i&0x7F)+1);
+    }
+    bool contains( T x ) const {
+        if( x==T(0) ) {
+            return true;
+        } else {
+            int j = int(1/x+T(0.5));
+            if( 0<j && j<=128 ) {
+                T error = x*T(j)-T(1);
+                // In the calculation above, if x was indeed generated by method get, the error should be 
+                // at most epsilon, because x is off by at most 1/2 ulp from its infinitely precise value, 
+                // j is exact, and the multiplication incurs at most another 1/2 ulp of round-off error.
+                if( -epsilon<=error && error<=epsilon ) {
+                    return true;
+                } else {
+                    REPORT("Warning: excessive floating-point error encountered j=%d x=%.15g error=%.15g\n",j,x,error);
+                }
+            }
+            return false;
+        }
+    };
+};
+
+template<> 
+class SparseValueSet<float>: public SparseFloatSet<float> {};
+
+template<> 
+class SparseValueSet<double>: public SparseFloatSet<double> {};
+
+template<typename T>
+class HammerAssignment: NoAssign {
+    tbb::atomic<T>& x;
+    const char* name;
+    SparseValueSet<T> set;
+public:   
+    HammerAssignment( tbb::atomic<T>& x_, const char* name_ ) : x(x_), name(name_) {}
+    void operator()( int k ) const {
+        const int n = 1000000;
+        if( k ) {
+            tbb::atomic<T> z;
+            AssertSameType( z=x, z );    // Check that return type from assignment is correct
+            for( int i=0; i<n; ++i ) {
+                // Read x atomically into z.
+                z = x;
+                if( !set.contains(z) ) {
+                    REPORT("ERROR: assignment of atomic<%s> is not atomic\n", name);
+                    ParallelError = true;
+                    return;
+                }
+            }
+        } else {
+            tbb::atomic<T> y;
+            for( int i=0; i<n; ++i ) {
+                // Get pseudo-random value. 
+                y = set.get(i);
+                // Write y atomically into x.
+                x = y;
+            }
+        }
+    }
+};
+
+// Compile-time check that a class method has the required signature.
+// Intended to check the assignment operator of tbb::atomic.
+template<typename T> void TestAssignmentSignature( T& (T::*)(const T&) ) {}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress "conditional expression is constant" warning.
+    #pragma warning( push )
+    #pragma warning( disable: 4127 )
+#endif
+
+template<typename T>
+void TestAssignment( const char* name ) {
+    TestAssignmentSignature( &tbb::atomic<T>::operator= );
+    tbb::atomic<T> x;
+    x = T(0);
+    NativeParallelFor( 2, HammerAssignment<T>( x, name ) );
+#if __TBB_x86_32 && (__linux__ || __FreeBSD__ || _WIN32)
+    if( sizeof(T)==8 ) {
+        // Some compilers for IA-32 fail to provide 8-byte alignment of objects on the stack, 
+        // even if the object specifies 8-byte alignment.  On such platforms, the IA-32 implementation 
+        // of atomic<long long> and atomic<unsigned long long> use different tactics depending upon 
+        // whether the object is properly aligned or not.  The following abusive test ensures that we
+        // cover both the proper and improper alignment cases, one with the x above and the other with 
+        // the y below, perhaps not respectively.
+
+        // Allocate space big enough to always contain 8-byte locations that are aligned and misaligned.
+        char raw_space[15];
+        // Set delta to 0 if x is aligned, 4 otherwise.
+        uintptr_t delta = ((reinterpret_cast<uintptr_t>(&x)&7) ? 0 : 4); 
+        // y crosses 8-byte boundary if and only if x does not cross.
+        tbb::atomic<T>& y = *reinterpret_cast<tbb::atomic<T>*>((reinterpret_cast<uintptr_t>(&raw_space[7+delta])&~7u) - delta);
+        // Assertion checks that y really did end up somewhere inside "raw_space".
+        ASSERT( raw_space<=reinterpret_cast<char*>(&y), "y starts before raw_space" );
+        ASSERT( reinterpret_cast<char*>(&y+1) <= raw_space+sizeof(raw_space), "y starts after raw_space" );
+        y = T(0);
+        NativeParallelFor( 2, HammerAssignment<T>( y, name ) );
+    }
+#endif /* __TBB_x86_32 && (__linux__ || __FreeBSD__ || _WIN32) */
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+template<typename T>
+void TestParallel( const char* name ) {
+    TestLoadAndStoreFences<T>(name);
+    TestAssignment<T>(name);
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range.cpp
new file mode 100644 (file)
index 0000000..2ee2c88
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/blocked_range.h"
+#include "harness_assert.h"
+
+// First test as much as we can without including other headers.
+// Doing so should catch problems arising from failing to include headers.
+
+class AbstractValueType {
+    AbstractValueType() {}
+    int value;
+public: 
+    friend AbstractValueType MakeAbstractValueType( int i );
+    friend int GetValueOf( const AbstractValueType& v ) {return v.value;}
+};
+
+AbstractValueType MakeAbstractValueType( int i ) {
+    AbstractValueType x;
+    x.value = i;
+    return x;
+}
+
+std::size_t operator-( const AbstractValueType& u, const AbstractValueType& v ) {
+    return GetValueOf(u)-GetValueOf(v);
+}
+
+bool operator<( const AbstractValueType& u, const AbstractValueType& v ) {
+    return GetValueOf(u)<GetValueOf(v);
+}
+
+AbstractValueType operator+( const AbstractValueType& u, std::size_t offset ) {
+    return MakeAbstractValueType(GetValueOf(u)+int(offset));
+}
+
+static void SerialTest() {
+    for( int x=-10; x<10; ++x )
+        for( int y=-10; y<10; ++y ) {
+            AbstractValueType i = MakeAbstractValueType(x);
+            AbstractValueType j = MakeAbstractValueType(y);
+            for( std::size_t k=1; k<10; ++k ) {
+                typedef tbb::blocked_range<AbstractValueType> range_type;
+                range_type r( i, j, k );
+                AssertSameType( r.empty(), true );
+                AssertSameType( range_type::size_type(), std::size_t() );
+                AssertSameType( static_cast<range_type::const_iterator*>(0), static_cast<AbstractValueType*>(0) );
+                AssertSameType( r.begin(), MakeAbstractValueType(0) );
+                AssertSameType( r.end(), MakeAbstractValueType(0) );
+                ASSERT( r.empty()==(y<=x), NULL );
+                ASSERT( r.grainsize()==k, NULL );
+                if( x<=y ) {
+                    AssertSameType( r.is_divisible(), true );
+                    ASSERT( r.is_divisible()==(std::size_t(y-x)>k), NULL );
+                    ASSERT( r.size()==std::size_t(y-x), NULL );
+                    if( r.is_divisible() ) {
+                        tbb::blocked_range<AbstractValueType> r2(r,tbb::split());
+                        ASSERT( GetValueOf(r.begin())==x, NULL );
+                        ASSERT( GetValueOf(r.end())==GetValueOf(r2.begin()), NULL );
+                        ASSERT( GetValueOf(r2.end())==y, NULL );
+                        ASSERT( r.grainsize()==k, NULL );
+                        ASSERT( r2.grainsize()==k, NULL );
+                    }
+                }
+            }
+        }
+}
+
+#include "tbb/parallel_for.h"
+#include "harness.h"
+
+const int N = 1<<22;
+
+unsigned char Array[N];
+
+struct Striker {
+    // Note: we use <int> here instead of <long> in order to test for Quad 407676
+    void operator()( const tbb::blocked_range<int>& r ) const {
+        for( tbb::blocked_range<int>::const_iterator i=r.begin(); i!=r.end(); ++i )
+            ++Array[i];
+    }
+};
+
+void ParallelTest() {
+    for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) {
+        const tbb::blocked_range<int> r( 0, i, 10 );
+        tbb::parallel_for( r, Striker() );
+        for( int k=0; k<N; ++k ) {
+            ASSERT( Array[k]==(k<i), NULL );
+            Array[k] = 0;
+        }
+    }
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    SerialTest();
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init(p);
+        ParallelTest();
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range2d.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range2d.cpp
new file mode 100644 (file)
index 0000000..5eb109c
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/blocked_range2d.h"
+#include "harness_assert.h"
+
+// First test as much as we can without including other headers.
+// Doing so should catch problems arising from failing to include headers.
+
+template<typename Tag>
+class AbstractValueType {
+    AbstractValueType() {}
+    int value;
+public: 
+    template<typename OtherTag>
+    friend AbstractValueType<OtherTag> MakeAbstractValueType( int i );
+
+    template<typename OtherTag>
+    friend int GetValueOf( const AbstractValueType<OtherTag>& v ) ;
+};
+
+template<typename Tag>
+AbstractValueType<Tag> MakeAbstractValueType( int i ) {
+    AbstractValueType<Tag> x;
+    x.value = i;
+    return x;
+}
+
+template<typename Tag>
+int GetValueOf( const AbstractValueType<Tag>& v ) {return v.value;}
+
+template<typename Tag>
+bool operator<( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) {
+    return GetValueOf(u)<GetValueOf(v);
+}
+
+template<typename Tag>
+std::size_t operator-( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) {
+    return GetValueOf(u)-GetValueOf(v);
+}
+
+template<typename Tag>
+AbstractValueType<Tag> operator+( const AbstractValueType<Tag>& u, std::size_t offset ) {
+    return MakeAbstractValueType<Tag>(GetValueOf(u)+int(offset));
+}
+
+struct RowTag {};
+struct ColTag {};
+
+static void SerialTest() {
+    typedef AbstractValueType<RowTag> row_type;
+    typedef AbstractValueType<ColTag> col_type;
+    typedef tbb::blocked_range2d<row_type,col_type> range_type;
+    for( int rowx=-10; rowx<10; ++rowx ) {
+        for( int rowy=rowx; rowy<10; ++rowy ) {
+            row_type rowi = MakeAbstractValueType<RowTag>(rowx);
+            row_type rowj = MakeAbstractValueType<RowTag>(rowy);
+            for( int rowg=1; rowg<10; ++rowg ) {
+                for( int colx=-10; colx<10; ++colx ) {
+                    for( int coly=colx; coly<10; ++coly ) {
+                        col_type coli = MakeAbstractValueType<ColTag>(colx);
+                        col_type colj = MakeAbstractValueType<ColTag>(coly);
+                        for( int colg=1; colg<10; ++colg ) {
+                            range_type r( rowi, rowj, rowg, coli, colj, colg );
+                            AssertSameType( r.is_divisible(), true );
+                            AssertSameType( r.empty(), true );
+                            AssertSameType( static_cast<range_type::row_range_type::const_iterator*>(0), static_cast<row_type*>(0) );
+                            AssertSameType( static_cast<range_type::col_range_type::const_iterator*>(0), static_cast<col_type*>(0) );
+                            AssertSameType( r.rows(), tbb::blocked_range<row_type>( rowi, rowj, 1 ));
+                            AssertSameType( r.cols(), tbb::blocked_range<col_type>( coli, colj, 1 ));
+                            ASSERT( r.empty()==(rowx==rowy||colx==coly), NULL );
+                            ASSERT( r.is_divisible()==(rowy-rowx>rowg||coly-colx>colg), NULL );
+                            if( r.is_divisible() ) {
+                                range_type r2(r,tbb::split());
+                                if( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().begin()) ) {
+                                    ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL );
+                                    ASSERT( GetValueOf(r2.cols().begin())==GetValueOf(r.cols().end()), NULL );
+                                } else {
+                                    ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL );
+                                    ASSERT( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().end()), NULL );
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+#include "tbb/parallel_for.h"
+#include "harness.h"
+
+const int N = 1<<10;
+
+unsigned char Array[N][N];
+
+struct Striker {
+   // Note: we use <int> here instead of <long> in order to test for problems similar to Quad 407676
+    void operator()( const tbb::blocked_range2d<int>& r ) const {
+        for( tbb::blocked_range<int>::const_iterator i=r.rows().begin(); i!=r.rows().end(); ++i )
+            for( tbb::blocked_range<int>::const_iterator j=r.cols().begin(); j!=r.cols().end(); ++j )
+                ++Array[i][j];
+    }
+};
+
+void ParallelTest() {
+    for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) {
+        for( int j=0; j<N; j=j<3 ? j+1 : j*3 ) {
+            const tbb::blocked_range2d<int> r( 0, i, 7, 0, j, 5 );
+            tbb::parallel_for( r, Striker() );
+            for( int k=0; k<N; ++k ) {
+                for( int l=0; l<N; ++l ) {
+                    ASSERT( Array[k][l]==(k<i && l<j), NULL );
+                    Array[k][l] = 0;
+                }
+            }   
+        }
+    }
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    SerialTest();
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init(p);
+        ParallelTest();
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range3d.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_blocked_range3d.cpp
new file mode 100644 (file)
index 0000000..259cce7
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/blocked_range3d.h"
+#include "harness_assert.h"
+
+// First test as much as we can without including other headers.
+// Doing so should catch problems arising from failing to include headers.
+
+template<typename Tag>
+class AbstractValueType {
+    AbstractValueType() {}
+    int value;
+public: 
+    template<typename OtherTag>
+    friend AbstractValueType<OtherTag> MakeAbstractValueType( int i );
+
+    template<typename OtherTag>
+    friend int GetValueOf( const AbstractValueType<OtherTag>& v ) ;
+};
+
+template<typename Tag>
+AbstractValueType<Tag> MakeAbstractValueType( int i ) {
+    AbstractValueType<Tag> x;
+    x.value = i;
+    return x;
+}
+
+template<typename Tag>
+int GetValueOf( const AbstractValueType<Tag>& v ) {return v.value;}
+
+template<typename Tag>
+bool operator<( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) {
+    return GetValueOf(u)<GetValueOf(v);
+}
+
+template<typename Tag>
+std::size_t operator-( const AbstractValueType<Tag>& u, const AbstractValueType<Tag>& v ) {
+    return GetValueOf(u)-GetValueOf(v);
+}
+
+template<typename Tag>
+AbstractValueType<Tag> operator+( const AbstractValueType<Tag>& u, std::size_t offset ) {
+    return MakeAbstractValueType<Tag>(GetValueOf(u)+int(offset));
+}
+
+struct PageTag {};
+struct RowTag {};
+struct ColTag {};
+
+static void SerialTest() {
+    typedef AbstractValueType<PageTag> page_type;
+    typedef AbstractValueType<RowTag> row_type;
+    typedef AbstractValueType<ColTag> col_type;
+    typedef tbb::blocked_range3d<page_type,row_type,col_type> range_type;
+    for( int pagex=-4; pagex<4; ++pagex ) {
+        for( int pagey=pagex; pagey<4; ++pagey ) {
+            page_type pagei = MakeAbstractValueType<PageTag>(pagex);
+            page_type pagej = MakeAbstractValueType<PageTag>(pagey);
+            for( int pageg=1; pageg<4; ++pageg ) {
+                for( int rowx=-4; rowx<4; ++rowx ) {
+                    for( int rowy=rowx; rowy<4; ++rowy ) {
+                        row_type rowi = MakeAbstractValueType<RowTag>(rowx);
+                        row_type rowj = MakeAbstractValueType<RowTag>(rowy);
+                        for( int rowg=1; rowg<4; ++rowg ) {
+                            for( int colx=-4; colx<4; ++colx ) {
+                                for( int coly=colx; coly<4; ++coly ) {
+                                    col_type coli = MakeAbstractValueType<ColTag>(colx);
+                                    col_type colj = MakeAbstractValueType<ColTag>(coly);
+                                    for( int colg=1; colg<4; ++colg ) {
+                                        range_type r( pagei, pagej, pageg, rowi, rowj, rowg, coli, colj, colg );
+                                        AssertSameType( r.is_divisible(), true );
+
+                                        AssertSameType( r.empty(), true );
+
+                                        AssertSameType( static_cast<range_type::page_range_type::const_iterator*>(0), static_cast<page_type*>(0) );
+                                        AssertSameType( static_cast<range_type::row_range_type::const_iterator*>(0), static_cast<row_type*>(0) );
+                                        AssertSameType( static_cast<range_type::col_range_type::const_iterator*>(0), static_cast<col_type*>(0) );
+
+                                        AssertSameType( r.pages(), tbb::blocked_range<page_type>( pagei, pagej, 1 ));
+                                        AssertSameType( r.rows(), tbb::blocked_range<row_type>( rowi, rowj, 1 ));
+                                        AssertSameType( r.cols(), tbb::blocked_range<col_type>( coli, colj, 1 ));
+
+                                        ASSERT( r.empty()==(pagex==pagey||rowx==rowy||colx==coly), NULL );
+
+                                        ASSERT( r.is_divisible()==(pagey-pagex>pageg||rowy-rowx>rowg||coly-colx>colg), NULL );
+
+                                        if( r.is_divisible() ) {
+                                            range_type r2(r,tbb::split());
+                                            if( (GetValueOf(r2.pages().begin())==GetValueOf(r.pages().begin())) && (GetValueOf(r2.rows().begin())==GetValueOf(r.rows().begin())) ) {
+                                                ASSERT( GetValueOf(r2.pages().end())==GetValueOf(r.pages().end()), NULL );
+                                                ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL );
+                                                ASSERT( GetValueOf(r2.cols().begin())==GetValueOf(r.cols().end()), NULL );
+                                            } else {
+                                                if ( (GetValueOf(r2.pages().begin())==GetValueOf(r.pages().begin())) && (GetValueOf(r2.cols().begin())==GetValueOf(r.cols().begin())) ) {
+                                                    ASSERT( GetValueOf(r2.pages().end())==GetValueOf(r.pages().end()), NULL );
+                                                    ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL );
+                                                    ASSERT( GetValueOf(r2.rows().begin())==GetValueOf(r.rows().end()), NULL );
+                                                } else {
+                                                   ASSERT( GetValueOf(r2.rows().end())==GetValueOf(r.rows().end()), NULL );
+                                                   ASSERT( GetValueOf(r2.cols().end())==GetValueOf(r.cols().end()), NULL );
+                                                   ASSERT( GetValueOf(r2.pages().begin())==GetValueOf(r.pages().end()), NULL );
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+#include "tbb/parallel_for.h"
+#include "harness.h"
+
+const int N = 1<<5;
+
+unsigned char Array[N][N][N];
+
+struct Striker {
+   // Note: we use <int> here instead of <long> in order to test for problems similar to Quad 407676
+    void operator()( const tbb::blocked_range3d<int>& r ) const {
+        for( tbb::blocked_range<int>::const_iterator i=r.pages().begin(); i!=r.pages().end(); ++i )
+            for( tbb::blocked_range<int>::const_iterator j=r.rows().begin(); j!=r.rows().end(); ++j )
+                for( tbb::blocked_range<int>::const_iterator k=r.cols().begin(); k!=r.cols().end(); ++k )
+                    ++Array[i][j][k];
+    }
+};
+
+void ParallelTest() {
+    for( int i=0; i<N; i=i<3 ? i+1 : i*3 ) {
+        for( int j=0; j<N; j=j<3 ? j+1 : j*3 ) {
+            for( int k=0; k<N; k=k<3 ? k+1 : k*3 ) {
+                const tbb::blocked_range3d<int> r( 0, i, 5, 0, j, 3, 0, k, 1 );
+                tbb::parallel_for( r, Striker() );
+                for( int l=0; l<N; ++l ) {
+                    for( int m=0; m<N; ++m ) {
+                        for( int n=0; n<N; ++n ) {
+                             ASSERT( Array[l][m][n]==(l<i && m<j && n<k), NULL );
+                             Array[l][m][n] = 0;
+                        }
+                    }
+                }   
+            }
+        }
+    }
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    SerialTest(); 
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init(p);
+        ParallelTest();
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator.cpp
new file mode 100644 (file)
index 0000000..cd207ca
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test whether cache_aligned_allocator works with some of the host's STL containers.
+
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_allocator.h"
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+// the real body of the test is there:
+#include "test_allocator.h"
+
+template<>
+struct is_zero_filling<tbb::zero_allocator<void> > {
+    static const bool value = true;
+};
+
+int TestMain () {
+    int result = TestMain<tbb::cache_aligned_allocator<void> >();
+    result += TestMain<tbb::tbb_allocator<void> >();
+    result += TestMain<tbb::zero_allocator<void> >();
+    ASSERT( !result, NULL );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator_STL.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cache_aligned_allocator_STL.cpp
new file mode 100644 (file)
index 0000000..7e9d167
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test whether cache_aligned_allocator works with some of the host's STL containers.
+
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_allocator.h"
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "test_allocator_STL.h"
+
+int TestMain () {
+    TestAllocatorWithSTL<tbb::cache_aligned_allocator<void> >();
+    TestAllocatorWithSTL<tbb::tbb_allocator<void> >();
+    TestAllocatorWithSTL<tbb::zero_allocator<void> >();
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cilk_interop.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_cilk_interop.cpp
new file mode 100644 (file)
index 0000000..382f519
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_config.h"
+#include "harness.h"
+
+#if __TBB_SURVIVE_THREAD_SWITCH && __INTEL_COMPILER >= 1200
+
+static const int N = 14;
+static const int P_outer = 4;
+static const int P_nested = 2;
+
+#include <cilk/cilk.h>
+#define private public
+#include "tbb/task.h"
+#undef private
+#include "tbb/task_scheduler_init.h"
+#include <cstdio>
+#include <cassert>
+
+enum tbb_sched_injection_mode_t {
+    tbbsched_none = 0,
+    tbbsched_explicit_only = 1,
+    tbbsched_auto_only = 2,
+    tbbsched_mixed = 3
+};
+
+tbb_sched_injection_mode_t g_sim = tbbsched_none;
+
+bool g_sandwich = false;
+
+// A time delay routine
+void Delay( int n ) {
+    static volatile int Global;
+    for( int k=0; k<10000; ++k )
+        for( int i=0; i<n; ++i )
+            ++Global;
+}
+
+int SerialFib( int n ) {
+    int a=0, b=1;
+    for( int i=0; i<n; ++i ) {
+        b += a;
+        a = b-a;
+    }
+    return a;
+}
+
+int F = SerialFib(N);
+
+int Fib ( int n ) {
+    if( n < 2 ) {
+        if ( g_sim ) {
+            tbb::task_scheduler_init tsi(P_nested);
+        }
+        return n;
+    } else {
+        tbb::task_scheduler_init *tsi = NULL;
+        tbb::task *cur = NULL;
+        if ( g_sim ) {
+            if ( n % 2 == 0 ) {
+                if ( g_sim == tbbsched_auto_only || (g_sim == tbbsched_mixed && n % 4 == 0) ) {
+                    // Trigger TBB scheduler auto-initialization
+                    cur = &tbb::task::self();
+                }
+                else {
+                    ASSERT ( g_sim == tbbsched_explicit_only || (g_sim == tbbsched_mixed && n % 4 != 0), NULL );
+                    // Initialize TBB scheduler explicitly
+                    tsi = new tbb::task_scheduler_init(P_nested);
+                }
+            }
+        }
+        int x, y;
+        x = cilk_spawn Fib(n-2);
+        y = cilk_spawn Fib(n-1);
+        cilk_sync;
+        if ( tsi )
+            delete tsi;
+        return x+y;
+    }
+}
+
+int TBB_Fib( int n );
+
+class FibCilkSubtask: public tbb::task {
+    int n;
+    int& result;
+    /*override*/ task* execute() {
+        if( n<2 ) {
+            result = n;
+        } else {
+            int x, y;
+            x = cilk_spawn TBB_Fib(n-2);
+            y = cilk_spawn TBB_Fib(n-1);
+            cilk_sync;
+            result = x+y;
+        }
+        return NULL;
+    }
+public:
+    FibCilkSubtask( int& result_, int n_ ) : result(result_), n(n_) {}
+};
+
+class FibTask: public tbb::task {
+    int n;
+    int& result;
+    /*override*/ task* execute() {
+        if( !g_sandwich && n<2 ) {
+            result = n;
+        } else {
+            int x,y;
+            tbb::task_scheduler_init init(P_nested);
+            task* self0 = &task::self();
+            set_ref_count( 3 );
+            if ( g_sandwich ) {
+                spawn (*new( allocate_child() ) FibCilkSubtask(x,n-1));
+                spawn (*new( allocate_child() ) FibCilkSubtask(y,n-2));
+            }
+            else {
+                spawn (*new( allocate_child() ) FibTask(x,n-1));
+                spawn (*new( allocate_child() ) FibTask(y,n-2));
+            }
+            wait_for_all(); 
+            task* self1 = &task::self();
+            ASSERT( self0 == self1, "failed to preserve TBB TLS" );
+            result = x+y;
+        }
+        return NULL;
+    }
+public:
+    FibTask( int& result_, int n_ ) : result(result_), n(n_) {}
+};
+
+int TBB_Fib( int n ) {
+    if( n<2 ) {
+        return n;
+    } else {
+        int result;
+        tbb::task_scheduler_init init(P_nested);
+        tbb::task::spawn_root_and_wait(*new( tbb::task::allocate_root()) FibTask(result,n) );
+        return result;
+    }
+}
+
+void RunCilkOnly ( tbb_sched_injection_mode_t sim ) {
+    g_sim = sim;
+    int m = Fib(N);
+    ASSERT( m == F, NULL );
+}
+
+struct FibBody : NoAssign, Harness::NoAfterlife {
+    void operator() ( int ) const {
+        int m = Fib(N);
+        ASSERT( m == F, NULL );
+    }
+};
+
+void RunCilkOnlyConcurrently ( tbb_sched_injection_mode_t sim ) {
+    g_sim = sim;
+    NativeParallelFor( P_outer, FibBody() );
+}
+
+void RunSandwich( bool sandwich ) { 
+    g_sandwich = sandwich;
+    tbb::task_scheduler_init init(P_outer);
+    int m = TBB_Fib(N);
+    ASSERT( g_sandwich == sandwich, "Memory corruption detected" );
+    ASSERT( m == F, NULL );
+}
+
+int TestMain () {
+    for ( int i = 0; i < 100; ++i )
+        RunCilkOnlyConcurrently( tbbsched_none );
+    RunCilkOnly( tbbsched_none );
+    RunCilkOnly( tbbsched_explicit_only );
+    RunCilkOnly( tbbsched_auto_only );
+    RunCilkOnly( tbbsched_mixed );
+    RunSandwich( false );
+    for ( int i = 0; i < 10; ++i )
+        RunSandwich( true );
+    __cilkrts_end_cilk();
+    return Harness::Done;
+}
+
+#else /* No Cilk interop */
+
+int TestMain () {
+    return Harness::Skipped;
+}
+
+#endif /* No Cilk interop */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_combinable.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_combinable.cpp
new file mode 100644 (file)
index 0000000..b395f04
--- /dev/null
@@ -0,0 +1,460 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define __TBB_EXTRA_DEBUG 1 // for concurrent_hash_map
+#include "tbb/combinable.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/parallel_for.h"
+#include "tbb/parallel_reduce.h"
+#include "tbb/blocked_range.h"
+#include "tbb/tick_count.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/tbb_thread.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+#include <vector>
+#include <utility>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "harness_assert.h"
+#include "harness.h"
+
+#if __TBB_GCC_WARNING_SUPPRESSION_ENABLED
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+
+static tbb::atomic<int> construction_counter;
+static tbb::atomic<int> destruction_counter;
+
+const int REPETITIONS = 10;
+const int N = 100000;
+const int VALID_NUMBER_OF_KEYS = 100;
+const double EXPECTED_SUM = (REPETITIONS + 1) * N;
+
+//
+// A minimal class
+// Define: default and copy constructor, and allow implicit operator&
+// also operator=
+//
+
+class minimal {
+private:
+    int my_value;
+public:
+    minimal(int val=0) : my_value(val) { ++construction_counter; }
+    minimal( const minimal &m ) : my_value(m.my_value) { ++construction_counter; }
+    minimal& operator=(const minimal& other) { my_value = other.my_value; return *this; }
+    minimal& operator+=(const minimal& other) { my_value += other.my_value; return *this; }
+    operator int() const { return my_value; }
+    ~minimal() { ++destruction_counter; }
+    void set_value( const int i ) { my_value = i; }
+    int value( ) const { return my_value; }
+};
+
+//// functors for initialization and combine
+
+// Addition
+template <typename T>
+struct FunctorAddFinit {
+    T operator()() { return 0; }
+};
+
+template <typename T>
+struct FunctorAddFinit7 {
+    T operator()() { return 7; }
+};
+
+template <typename T>
+struct FunctorAddCombine {
+    T operator()(T left, T right ) const {
+        return left + right;
+    }
+};
+
+template <typename T>
+struct FunctorAddCombineRef {
+    T operator()(const T& left, const T& right ) const {
+        return left + right;
+    }
+};
+
+template <typename T>
+T my_finit( ) { return 0; }
+
+template <typename T>
+T my_combine( T left, T right) { return left + right; }
+
+template <typename T>
+T my_combine_ref( const T &left, const T &right) { return left + right; }
+
+template <typename T>
+class CombineEachHelper {
+public:
+    CombineEachHelper(T& _result) : my_result(_result) {}
+    void operator()(const T& new_bit) { my_result +=  new_bit; }
+    CombineEachHelper& operator=(const CombineEachHelper& other) { 
+        my_result =  other; 
+        return *this; 
+    }
+private:
+    T& my_result;
+};
+
+template <typename T>
+class CombineEachHelperCnt {
+public:
+    CombineEachHelperCnt(T& _result, int& _nbuckets) : my_result(_result), nBuckets(_nbuckets) {}
+    void operator()(const T& new_bit) { my_result +=  new_bit; ++nBuckets; }
+    CombineEachHelperCnt& operator=(const CombineEachHelperCnt& other) { 
+        my_result =  other.my_result; 
+        nBuckets = other.nBuckets;
+        return *this; 
+    }
+private:
+    T& my_result;
+    int& nBuckets;
+};
+
+template <typename T>
+class CombineEachVectorHelper {
+public:
+    typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType;
+    CombineEachVectorHelper(T& _result) : my_result(_result) { }
+    void operator()(const ContainerType& new_bit) { 
+        for(typename ContainerType::const_iterator ci = new_bit.begin(); ci != new_bit.end(); ++ci) {
+            my_result +=  *ci;
+        }
+    }
+    CombineEachVectorHelper& operator=(const CombineEachVectorHelper& other) { my_result=other.my_result; return *this;}
+private:
+    T& my_result;
+};
+
+
+
+//// end functors
+
+template< typename T >
+void run_serial_scalar_tests(const char *test_name) {
+    tbb::tick_count t0;
+    T sum = 0;
+
+    REMARK("Testing serial %s... ", test_name);
+    for (int t = -1; t < REPETITIONS; ++t) {
+        if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+        for (int i = 0; i < N; ++i) {
+            sum += 1; 
+        }
+    }
+    double ResultValue = sum;
+    ASSERT( EXPECTED_SUM == ResultValue, NULL);
+    REMARK("done\nserial %s, 0, %g, %g\n", test_name, ResultValue, ( tbb::tick_count::now() - t0).seconds());
+}
+
+
+template <typename T>
+class ParallelScalarBody: NoAssign {
+    
+    tbb::combinable<T> &sums;
+public:
+
+    ParallelScalarBody ( tbb::combinable<T> &_sums ) : sums(_sums) { }
+
+    void operator()( const tbb::blocked_range<int> &r ) const {
+        for (int i = r.begin(); i != r.end(); ++i) { 
+            bool was_there;
+            T& my_local = sums.local(was_there);
+            if(!was_there) my_local = 0;
+             my_local +=  1 ;
+        }
+    }
+   
+};
+
+// parallel body with no test for first access.
+template <typename T>
+class ParallelScalarBodyNoInit: NoAssign {
+    
+    tbb::combinable<T> &sums;
+public:
+
+    ParallelScalarBodyNoInit ( tbb::combinable<T> &_sums ) : sums(_sums) { }
+
+    void operator()( const tbb::blocked_range<int> &r ) const {
+        for (int i = r.begin(); i != r.end(); ++i) { 
+             sums.local() +=  1 ;
+        }
+    }
+   
+};
+
+template< typename T >
+void RunParallelScalarTests(const char *test_name) {
+
+    tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
+
+    for (int p = MinThread; p <= MaxThread; ++p) { 
+
+
+        if (p == 0) continue;
+
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); 
+        init.initialize(p);
+
+        tbb::tick_count t0;
+
+        T assign_sum(0);
+
+        T combine_sum(0);
+
+        T combine_ref_sum(0);
+
+        T combine_each_sum(0);
+
+        T combine_finit_sum(0);
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+
+            tbb::combinable<T> sums;
+            FunctorAddFinit<T> my_finit_decl;
+            tbb::combinable<T> finit_combinable(my_finit_decl);
+                                    
+
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBodyNoInit<T>( finit_combinable ) );
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBody<T>( sums ) );
+
+            // Use combine
+            combine_sum +=  sums.combine(my_combine<T>);
+            combine_ref_sum +=  sums.combine(my_combine_ref<T>);
+
+            CombineEachHelper<T> my_helper(combine_each_sum);
+            sums.combine_each(my_helper);
+           
+            // test assignment
+            tbb::combinable<T> assigned;
+            assigned = sums;
+
+            assign_sum +=  assigned.combine(my_combine<T>);
+
+            combine_finit_sum += finit_combinable.combine(my_combine<T>);
+        }
+
+        ASSERT( EXPECTED_SUM == combine_sum, NULL);
+        ASSERT( EXPECTED_SUM == combine_ref_sum, NULL);
+        ASSERT( EXPECTED_SUM == assign_sum, NULL);
+        ASSERT( EXPECTED_SUM == combine_finit_sum, NULL);
+
+        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, static_cast<double>(combine_sum), 
+                                                      ( tbb::tick_count::now() - t0).seconds());
+        init.terminate();
+    }
+}
+
+
+template <typename T>
+class ParallelVectorForBody: NoAssign {
+    
+    tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &locals;
+public:
+
+    ParallelVectorForBody ( tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &_locals ) : locals(_locals) { }
+
+    void operator()( const tbb::blocked_range<int> &r ) const {
+        T one = 1;
+
+        for (int i = r.begin(); i < r.end(); ++i) {
+            locals.local().push_back( one );
+        }
+    }
+   
+};
+
+template< typename T >
+void RunParallelVectorTests(const char *test_name) {
+    tbb::tick_count t0;
+    tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
+    typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType;
+
+    for (int p = MinThread; p <= MaxThread; ++p) { 
+
+        if (p == 0) continue;
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p);
+        init.initialize(p);
+
+        T sum = 0;
+        T sum2 = 0;
+        T sum3 = 0;
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+            typedef typename tbb::combinable< ContainerType > CombinableType;
+            CombinableType vs;
+
+            tbb::parallel_for ( tbb::blocked_range<int> (0, N, 10000), ParallelVectorForBody<T>( vs ) );
+
+            // copy construct
+            CombinableType vs2(vs); // this causes an assertion failure, related to allocators...
+
+            // assign
+            CombinableType vs3;
+            vs3 = vs;
+
+            CombineEachVectorHelper<T> MyCombineEach(sum);
+            vs.combine_each(MyCombineEach);
+
+            CombineEachVectorHelper<T> MyCombineEach2(sum2);
+            vs2.combine_each(MyCombineEach2);
+
+            CombineEachVectorHelper<T> MyCombineEach3(sum3);
+            vs2.combine_each(MyCombineEach3);
+            // combine_each sums all elements of each vector into the result.
+        }
+
+        double ResultValue = sum;
+        ASSERT( EXPECTED_SUM == ResultValue, NULL);
+        ResultValue = sum2;
+        ASSERT( EXPECTED_SUM == ResultValue, NULL);
+        ResultValue = sum3;
+        ASSERT( EXPECTED_SUM == ResultValue, NULL);
+        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, ResultValue, ( tbb::tick_count::now() - t0).seconds());
+        init.terminate();
+    }
+}
+
+#include "harness_barrier.h"
+
+Harness::SpinBarrier sBarrier;
+
+struct Body : NoAssign {
+    tbb::combinable<int>* locals;
+    const int nthread;
+    const int nIters;
+    Body( int nthread_, int niters_ ) : nthread(nthread_), nIters(niters_) { sBarrier.initialize(nthread_); }
+
+
+    void operator()(int thread_id ) const {
+        bool existed;
+        sBarrier.wait();
+        for(int i = 0; i < nIters; ++i ) {
+            existed = thread_id & 1;
+            int oldval = locals->local(existed);
+            ASSERT(existed == (i > 0), "Error on first reference");
+            ASSERT(!existed || (oldval == thread_id), "Error on fetched value");
+            existed = thread_id & 1;
+            locals->local(existed) = thread_id;
+            ASSERT(existed, "Error on assignment");
+        }
+    }
+};
+
+void
+TestLocalAllocations( int nthread ) {
+    ASSERT(nthread > 0, "nthread must be positive");
+#define NITERATIONS 1000
+    Body myBody(nthread, NITERATIONS);
+    tbb::combinable<int> myCombinable;
+    myBody.locals = &myCombinable;
+
+    NativeParallelFor( nthread, myBody );
+
+    int mySum = 0;
+    int mySlots = 0;
+    CombineEachHelperCnt<int> myCountCombine(mySum, mySlots);
+    myCombinable.combine_each(myCountCombine);
+
+    ASSERT(nthread == mySlots, "Incorrect number of slots");
+    ASSERT(mySum == (nthread - 1) * nthread / 2, "Incorrect values in result");
+}
+
+
+void 
+RunParallelTests() {
+    RunParallelScalarTests<int>("int");
+    RunParallelScalarTests<double>("double");
+    RunParallelScalarTests<minimal>("minimal");
+    RunParallelVectorTests<int>("std::vector<int, tbb::tbb_allocator<int> >");
+    RunParallelVectorTests<double>("std::vector<double, tbb::tbb_allocator<double> >");
+}
+
+template <typename T>
+void
+RunAssignmentAndCopyConstructorTest(const char *test_name) {
+    REMARK("Testing assignment and copy construction for %s\n", test_name);
+
+    // test creation with finit function (combine returns finit return value if no threads have created locals)
+    FunctorAddFinit7<T> my_finit7_decl;
+    tbb::combinable<T> create2(my_finit7_decl);
+    ASSERT(7 == create2.combine(my_combine<T>), NULL);
+
+    // test copy construction with function initializer
+    tbb::combinable<T> copy2(create2);
+    ASSERT(7 == copy2.combine(my_combine<T>), NULL);
+
+    // test copy assignment with function initializer
+    FunctorAddFinit<T> my_finit_decl;
+    tbb::combinable<T> assign2(my_finit_decl);
+    assign2 = create2;
+    ASSERT(7 == assign2.combine(my_combine<T>), NULL);
+}
+
+void
+RunAssignmentAndCopyConstructorTests() {
+    REMARK("Running assignment and copy constructor tests\n");
+    RunAssignmentAndCopyConstructorTest<int>("int");
+    RunAssignmentAndCopyConstructorTest<double>("double");
+    RunAssignmentAndCopyConstructorTest<minimal>("minimal");
+}
+
+int TestMain () {
+    if (MaxThread > 0) {
+        RunParallelTests();
+    }
+    RunAssignmentAndCopyConstructorTests();
+    for(int i = 1 <= MinThread ? MinThread : 1; i <= MaxThread; ++i) {
+        REMARK("Testing local() allocation with nthreads=%d\n", i);
+        for(int j = 0; j < 100; ++j) {
+            TestLocalAllocations(i);
+        }
+    }
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_hash_map.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_hash_map.cpp
new file mode 100644 (file)
index 0000000..79dc351
--- /dev/null
@@ -0,0 +1,918 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifndef TBB_USE_PERFORMANCE_WARNINGS
+#define TBB_USE_PERFORMANCE_WARNINGS 1
+#endif
+
+// Our tests usually include the header under test first.  But this test needs
+// to use the preprocessor to edit the identifier runtime_warning in concurrent_hash_map.h.
+// Hence we include a few other headers before doing the abusive edit.
+#include "tbb/tbb_stddef.h" /* Defines runtime_warning */
+#include "harness_assert.h" /* Prerequisite for defining hooked_warning */
+
+// The symbol internal::runtime_warning is normally an entry point into the TBB library.
+// Here for sake of testing, we define it to be hooked_warning, a routine peculiar to this unit test.
+#define runtime_warning hooked_warning
+
+static bool bad_hashing = false;
+
+namespace tbb { 
+    namespace internal {
+        static void hooked_warning( const char* /*format*/, ... ) {
+            ASSERT(bad_hashing, "unexpected runtime_warning: bad hashing");
+        }
+    } // namespace internal
+} // namespace tbb
+#define __TBB_EXTRA_DEBUG 1 // enables additional checks
+#include "tbb/concurrent_hash_map.h"
+
+// Restore runtime_warning as an entry point into the TBB library.
+#undef runtime_warning
+
+namespace Jungle {
+    struct Tiger {};
+    size_t tbb_hasher( const Tiger& ) {return 0;}
+}
+
+#if !defined(_MSC_VER) || _MSC_VER>=1400 || __INTEL_COMPILER
+void test_ADL() {
+    tbb::tbb_hash_compare<Jungle::Tiger>::hash(Jungle::Tiger()); // Instantiation chain finds tbb_hasher via Argument Dependent Lookup
+}
+#endif
+
+struct UserDefinedKeyType {
+};
+
+namespace tbb {
+    // Test whether tbb_hash_compare can be partially specialized as stated in Reference manual.
+    template<> struct tbb_hash_compare<UserDefinedKeyType> {
+        size_t hash( UserDefinedKeyType ) const {return 0;}
+        bool equal( UserDefinedKeyType /*x*/, UserDefinedKeyType /*y*/ ) {return true;}
+    };
+}
+
+tbb::concurrent_hash_map<UserDefinedKeyType,int> TestInstantiationWithUserDefinedKeyType;
+
+// Test whether a sufficient set of headers were included to instantiate a concurernt_hash_map. OSS Bug #120 (& #130):
+// http://www.threadingbuildingblocks.org/bug_desc.php?id=120
+tbb::concurrent_hash_map<std::pair<std::pair<int,std::string>,const char*>,int> TestInstantiation;
+
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+#include "tbb/atomic.h"
+#include "tbb/tick_count.h"
+#include "harness.h"
+#include "harness_allocator.h"
+
+class MyException : public std::bad_alloc {
+public:
+    virtual const char *what() const throw() { return "out of items limit"; }
+    virtual ~MyException() throw() {}
+};
+
+/** Has tightly controlled interface so that we can verify
+    that concurrent_hash_map uses only the required interface. */
+class MyKey {
+private:
+    void operator=( const MyKey&  );    // Deny access
+    int key;
+    friend class MyHashCompare;
+    friend class YourHashCompare;
+public:
+    static MyKey make( int i ) {
+        MyKey result;
+        result.key = i;
+        return result;
+    }
+    int value_of() const {return key;}
+};
+
+tbb::atomic<long> MyDataCount;
+long MyDataCountLimit = 0;
+
+class MyData {
+protected:
+    friend class MyData2;
+    int data;
+    enum state_t {
+        LIVE=0x1234,
+        DEAD=0x5678
+    } my_state;
+    void operator=( const MyData& );    // Deny access
+public:
+    MyData(int i = 0) {
+        my_state = LIVE;
+        data = i;
+        if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit)
+            __TBB_THROW( MyException() );
+        ++MyDataCount;
+    }
+    MyData( const MyData& other ) {
+        ASSERT( other.my_state==LIVE, NULL );
+        my_state = LIVE;
+        data = other.data;
+        if(MyDataCountLimit && MyDataCount + 1 >= MyDataCountLimit)
+            __TBB_THROW( MyException() );
+        ++MyDataCount;
+    }
+    ~MyData() {
+        --MyDataCount;
+        my_state = DEAD;
+    }
+    static MyData make( int i ) {   
+        MyData result;
+        result.data = i;
+        return result;
+    }
+    int value_of() const {
+        ASSERT( my_state==LIVE, NULL );
+        return data;
+    }
+    void set_value( int i ) {
+        ASSERT( my_state==LIVE, NULL );
+        data = i;
+    }
+    bool operator==( const MyData& other ) const {
+        ASSERT( other.my_state==LIVE, NULL );
+        ASSERT( my_state==LIVE, NULL );
+        return data == other.data;
+    }
+};
+
+class MyData2 : public MyData {
+public:
+    MyData2( ) {}
+    MyData2( const MyData& other ) {
+        ASSERT( other.my_state==LIVE, NULL );
+        ASSERT( my_state==LIVE, NULL );
+        data = other.data;
+    }
+    void operator=( const MyData& other ) {
+        ASSERT( other.my_state==LIVE, NULL );
+        ASSERT( my_state==LIVE, NULL );
+        data = other.data;
+    }
+    void operator=( const MyData2& other ) {
+        ASSERT( other.my_state==LIVE, NULL );
+        ASSERT( my_state==LIVE, NULL );
+        data = other.data;
+    }
+    bool operator==( const MyData2& other ) const {
+        ASSERT( other.my_state==LIVE, NULL );
+        ASSERT( my_state==LIVE, NULL );
+        return data == other.data;
+    }
+};
+
+class MyHashCompare {
+public:
+    bool equal( const MyKey& j, const MyKey& k ) const {
+        return j.key==k.key;
+    }
+    unsigned long hash( const MyKey& k ) const {
+        return k.key;
+    }   
+};
+
+class YourHashCompare {
+public:
+    bool equal( const MyKey& j, const MyKey& k ) const {
+        return j.key==k.key;
+    }
+    unsigned long hash( const MyKey& ) const {
+        return 1;
+    }   
+};
+
+typedef local_counting_allocator<std::allocator<MyData> > MyAllocator;
+typedef tbb::concurrent_hash_map<MyKey,MyData,MyHashCompare,MyAllocator> MyTable;
+typedef tbb::concurrent_hash_map<MyKey,MyData2,MyHashCompare> MyTable2;
+typedef tbb::concurrent_hash_map<MyKey,MyData,YourHashCompare> YourTable;
+
+template<typename MyTable>
+inline void CheckAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true) {
+    size_t items_allocated = table.get_allocator().items_allocated, items_freed = table.get_allocator().items_freed;
+    size_t allocations = table.get_allocator().allocations, frees = table.get_allocator().frees;
+    REMARK("checking allocators: items %u/%u, allocs %u/%u\n",
+            unsigned(items_allocated), unsigned(items_freed), unsigned(allocations), unsigned(frees) );
+    ASSERT( items_allocated == allocations, NULL); ASSERT( items_freed == frees, NULL);
+    if(exact) {
+        ASSERT( allocations == expected_allocs, NULL); ASSERT( frees == expected_frees, NULL);
+    } else {
+        ASSERT( allocations >= expected_allocs, NULL); ASSERT( frees >= expected_frees, NULL);
+        ASSERT( allocations - frees == expected_allocs - expected_frees, NULL );
+    }
+}
+
+inline bool UseKey( size_t i ) {
+    return (i&3)!=3;
+}
+
+struct Insert {
+    static void apply( MyTable& table, int i ) {
+        if( UseKey(i) ) {
+            if( i&4 ) {
+                MyTable::accessor a;
+                table.insert( a, MyKey::make(i) );
+                if( i&1 )
+                    (*a).second.set_value(i*i);
+                else
+                    a->second.set_value(i*i);
+            } else
+                if( i&1 ) {
+                    MyTable::accessor a;
+                    table.insert( a, std::make_pair(MyKey::make(i), MyData(i*i)) );
+                    ASSERT( (*a).second.value_of()==i*i, NULL );
+                } else {
+                    MyTable::const_accessor ca;
+                    table.insert( ca, std::make_pair(MyKey::make(i), MyData(i*i)) );
+                    ASSERT( ca->second.value_of()==i*i, NULL );
+                }
+        }
+    }
+};
+
+struct Find {
+    static void apply( MyTable& table, int i ) {
+        MyTable::accessor a;
+        const MyTable::accessor& ca = a;
+        bool b = table.find( a, MyKey::make(i) );
+        ASSERT( b==!a.empty(), NULL );
+        if( b ) {
+            if( !UseKey(i) )
+                REPORT("Line %d: unexpected key %d present\n",__LINE__,i);
+            AssertSameType( &*a, static_cast<MyTable::value_type*>(0) );
+            ASSERT( ca->second.value_of()==i*i, NULL );
+            ASSERT( (*ca).second.value_of()==i*i, NULL );
+            if( i&1 )
+                ca->second.set_value( ~ca->second.value_of() );
+            else
+                (*ca).second.set_value( ~ca->second.value_of() );
+        } else {
+            if( UseKey(i) ) 
+                REPORT("Line %d: key %d missing\n",__LINE__,i);
+        }
+    }
+};
+
+struct FindConst {
+    static void apply( const MyTable& table, int i ) {
+        MyTable::const_accessor a;
+        const MyTable::const_accessor& ca = a;
+        bool b = table.find( a, MyKey::make(i) );
+        ASSERT( b==(table.count(MyKey::make(i))>0), NULL );
+        ASSERT( b==!a.empty(), NULL );
+        ASSERT( b==UseKey(i), NULL );
+        if( b ) {
+            AssertSameType( &*ca, static_cast<const MyTable::value_type*>(0) );
+            ASSERT( ca->second.value_of()==~(i*i), NULL );
+            ASSERT( (*ca).second.value_of()==~(i*i), NULL );
+        }
+    }
+};
+
+tbb::atomic<int> EraseCount;
+
+struct Erase {
+    static void apply( MyTable& table, int i ) {
+        bool b;
+        if(i&4) {
+            if(i&8) {
+                MyTable::const_accessor a;
+                b = table.find( a, MyKey::make(i) ) && table.erase( a );
+            } else {
+                MyTable::accessor a;
+                b = table.find( a, MyKey::make(i) ) && table.erase( a );
+            }
+        } else
+            b = table.erase( MyKey::make(i) );
+        if( b ) ++EraseCount;
+        ASSERT( table.count(MyKey::make(i)) == 0, NULL );
+    }
+};
+
+static const int IE_SIZE = 2;
+tbb::atomic<YourTable::size_type> InsertEraseCount[IE_SIZE];
+
+struct InsertErase  {
+    static void apply( YourTable& table, int i ) {
+        if ( i%3 ) {
+            int key = i%IE_SIZE;
+            if ( table.insert( std::make_pair(MyKey::make(key), MyData2()) ) ) 
+                ++InsertEraseCount[key];
+        } else {
+            int key = i%IE_SIZE;
+            if( i&1 ) {
+                YourTable::accessor res;
+                if(table.find( res, MyKey::make(key) ) && table.erase( res ) )
+                    --InsertEraseCount[key];
+            } else {
+                YourTable::const_accessor res;
+                if(table.find( res, MyKey::make(key) ) && table.erase( res ) )
+                    --InsertEraseCount[key];
+            }
+        }
+    }
+};
+
+// Test for the deadlock discussed at:
+// http://softwarecommunity.intel.com/isn/Community/en-US/forums/permalink/30253302/30253302/ShowThread.aspx#30253302
+struct InnerInsert {
+    static void apply( YourTable& table, int i ) {
+        YourTable::accessor a1, a2;
+        if(i&1) __TBB_Yield();
+        table.insert( a1, MyKey::make(1) );
+        __TBB_Yield();
+        table.insert( a2, MyKey::make(1 + (1<<30)) ); // the same chain
+        table.erase( a2 ); // if erase by key it would lead to deadlock for single thread
+    }
+};
+
+template<typename Op, typename MyTable>
+class TableOperation: NoAssign {
+    MyTable& my_table;
+public:
+    void operator()( const tbb::blocked_range<int>& range ) const {
+        for( int i=range.begin(); i!=range.end(); ++i ) 
+            Op::apply(my_table,i);
+    }
+    TableOperation( MyTable& table ) : my_table(table) {}
+};
+
+template<typename Op, typename TableType>
+void DoConcurrentOperations( TableType& table, int n, const char* what, int nthread ) {
+    REMARK("testing %s with %d threads\n",what,nthread);
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for( tbb::blocked_range<int>(0,n,100), TableOperation<Op,TableType>(table) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    REMARK("time for %s = %g with %d threads\n",what,(t1-t0).seconds(),nthread);
+}
+
+//! Test traversing the table with an iterator.
+void TraverseTable( MyTable& table, size_t n, size_t expected_size ) {
+    REMARK("testing traversal\n");
+    size_t actual_size = table.size();
+    ASSERT( actual_size==expected_size, NULL );
+    size_t count = 0;
+    bool* array = new bool[n];
+    memset( array, 0, n*sizeof(bool) );
+    const MyTable& const_table = table;
+    MyTable::const_iterator ci = const_table.begin();
+    for( MyTable::iterator i = table.begin(); i!=table.end(); ++i ) {
+        // Check iterator
+        int k = i->first.value_of();
+        ASSERT( UseKey(k), NULL );
+        ASSERT( (*i).first.value_of()==k, NULL );
+        ASSERT( 0<=k && size_t(k)<n, "out of bounds key" );
+        ASSERT( !array[k], "duplicate key" );
+        array[k] = true;
+        ++count;
+
+        // Check lower/upper bounds
+        std::pair<MyTable::iterator, MyTable::iterator> er = table.equal_range(i->first);
+        std::pair<MyTable::const_iterator, MyTable::const_iterator> cer = const_table.equal_range(i->first);
+        ASSERT(cer.first == er.first && cer.second == er.second, NULL);
+        ASSERT(cer.first == i, NULL);
+        ASSERT(std::distance(cer.first, cer.second) == 1, NULL);
+
+        // Check const_iterator
+        MyTable::const_iterator cic = ci++;
+        ASSERT( cic->first.value_of()==k, NULL );
+        ASSERT( (*cic).first.value_of()==k, NULL );
+    }
+    ASSERT( ci==const_table.end(), NULL );
+    delete[] array;
+    if( count!=expected_size ) {
+        REPORT("Line %d: count=%ld but should be %ld\n",__LINE__,long(count),long(expected_size));
+    }
+}
+
+typedef tbb::atomic<unsigned char> AtomicByte;
+
+template<typename RangeType>
+struct ParallelTraverseBody: NoAssign {
+    const size_t n;
+    AtomicByte* const array;
+    ParallelTraverseBody( AtomicByte array_[], size_t n_ ) : 
+        n(n_), 
+        array(array_)
+    {}
+    void operator()( const RangeType& range ) const {
+        for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) {
+            int k = i->first.value_of();
+            ASSERT( 0<=k && size_t(k)<n, NULL ); 
+            ++array[k];
+        }
+    }
+};
+
+void Check( AtomicByte array[], size_t n, size_t expected_size ) {
+    if( expected_size )
+        for( size_t k=0; k<n; ++k ) {
+            if( array[k] != int(UseKey(k)) ) {
+                REPORT("array[%d]=%d != %d=UseKey(%d)\n",
+                       int(k), int(array[k]), int(UseKey(k)), int(k));
+                ASSERT(false,NULL);
+            }
+        }
+}
+
+//! Test travering the tabel with a parallel range
+void ParallelTraverseTable( MyTable& table, size_t n, size_t expected_size ) {
+    REMARK("testing parallel traversal\n");
+    ASSERT( table.size()==expected_size, NULL );
+    AtomicByte* array = new AtomicByte[n];
+
+    memset( array, 0, n*sizeof(AtomicByte) );
+    MyTable::range_type r = table.range(10);
+    tbb::parallel_for( r, ParallelTraverseBody<MyTable::range_type>( array, n ));
+    Check( array, n, expected_size );
+
+    const MyTable& const_table = table;
+    memset( array, 0, n*sizeof(AtomicByte) );
+    MyTable::const_range_type cr = const_table.range(10);
+    tbb::parallel_for( cr, ParallelTraverseBody<MyTable::const_range_type>( array, n ));
+    Check( array, n, expected_size );
+
+    delete[] array;
+}
+
+void TestInsertFindErase( int nthread ) {
+    int n=250000; 
+
+    // compute m = number of unique keys
+    int m = 0;       
+    for( int i=0; i<n; ++i )
+        m += UseKey(i);
+    MyAllocator a; a.items_freed = a.frees = 100;
+    ASSERT( MyDataCount==0, NULL );
+    MyTable table(a);
+    TraverseTable(table,n,0);
+    ParallelTraverseTable(table,n,0);
+    CheckAllocator(table, 0, 100);
+
+    DoConcurrentOperations<Insert,MyTable>(table,n,"insert",nthread);
+    ASSERT( MyDataCount==m, NULL );
+    TraverseTable(table,n,m);
+    ParallelTraverseTable(table,n,m);
+    CheckAllocator(table, m, 100);
+
+    DoConcurrentOperations<Find,MyTable>(table,n,"find",nthread);
+    ASSERT( MyDataCount==m, NULL );
+    CheckAllocator(table, m, 100);
+
+    DoConcurrentOperations<FindConst,MyTable>(table,n,"find(const)",nthread);
+    ASSERT( MyDataCount==m, NULL );
+    CheckAllocator(table, m, 100);
+
+    EraseCount=0;
+    DoConcurrentOperations<Erase,MyTable>(table,n,"erase",nthread);
+    ASSERT( EraseCount==m, NULL );
+    ASSERT( MyDataCount==0, NULL );
+    TraverseTable(table,n,0);
+    CheckAllocator(table, m, m+100);
+
+    bad_hashing = true;
+    table.clear();
+    bad_hashing = false;
+
+    if(nthread > 1) {
+        YourTable ie_table;
+        for( int i=0; i<IE_SIZE; ++i )
+            InsertEraseCount[i] = 0;        
+        DoConcurrentOperations<InsertErase,YourTable>(ie_table,n/2,"insert_erase",nthread);
+        for( int i=0; i<IE_SIZE; ++i )
+            ASSERT( InsertEraseCount[i]==ie_table.count(MyKey::make(i)), NULL );
+
+        DoConcurrentOperations<InnerInsert,YourTable>(ie_table,2000,"inner insert",nthread);
+    }
+}
+
+volatile int Counter;
+
+class AddToTable: NoAssign {
+    MyTable& my_table;
+    const int my_nthread;
+    const int my_m;
+public:
+    AddToTable( MyTable& table, int nthread, int m ) : my_table(table), my_nthread(nthread), my_m(m) {}
+    void operator()( int ) const {
+        for( int i=0; i<my_m; ++i ) {
+            // Busy wait to synchronize threads
+            int j = 0;
+            while( Counter<i ) {
+                if( ++j==1000000 ) {
+                    // If Counter<i after a million iterations, then we almost surely have
+                    // more logical threads than physical threads, and should yield in 
+                    // order to let suspended logical threads make progress.
+                    j = 0;
+                    __TBB_Yield();
+                }
+            }
+            // Now all threads attempt to simultaneously insert a key.
+            int k;
+            {
+                MyTable::accessor a;
+                MyKey key = MyKey::make(i);
+                if( my_table.insert( a, key ) ) 
+                    a->second.set_value( 1 );
+                else 
+                    a->second.set_value( a->second.value_of()+1 );      
+                k = a->second.value_of();
+            }
+            if( k==my_nthread ) 
+                Counter=i+1;
+        }
+    }
+};
+
+class RemoveFromTable: NoAssign {
+    MyTable& my_table;
+    const int my_nthread;
+    const int my_m;
+public:
+    RemoveFromTable( MyTable& table, int nthread, int m ) : my_table(table), my_nthread(nthread), my_m(m) {}
+    void operator()(int) const {
+        for( int i=0; i<my_m; ++i ) {
+            bool b;
+            if(i&4) {
+                if(i&8) {
+                    MyTable::const_accessor a;
+                    b = my_table.find( a, MyKey::make(i) ) && my_table.erase( a );
+                } else {
+                    MyTable::accessor a;
+                    b = my_table.find( a, MyKey::make(i) ) && my_table.erase( a );
+                }
+            } else
+                b = my_table.erase( MyKey::make(i) );
+            if( b ) ++EraseCount;
+        }
+    }
+};
+
+//! Test for memory leak in concurrent_hash_map (TR #153).
+void TestConcurrency( int nthread ) {
+    REMARK("testing multiple insertions/deletions of same key with %d threads\n", nthread);
+    {
+        ASSERT( MyDataCount==0, NULL );
+        MyTable table;
+        const int m = 1000;
+        Counter = 0;
+        tbb::tick_count t0 = tbb::tick_count::now();
+        NativeParallelFor( nthread, AddToTable(table,nthread,m) );
+        tbb::tick_count t1 = tbb::tick_count::now();
+        REMARK("time for %u insertions = %g with %d threads\n",unsigned(MyDataCount),(t1-t0).seconds(),nthread);
+        ASSERT( MyDataCount==m, "memory leak detected" );
+
+        EraseCount = 0;
+        t0 = tbb::tick_count::now();
+        NativeParallelFor( nthread, RemoveFromTable(table,nthread,m) );
+        t1 = tbb::tick_count::now();
+        REMARK("time for %u deletions = %g with %d threads\n",unsigned(EraseCount),(t1-t0).seconds(),nthread);
+        ASSERT( MyDataCount==0, "memory leak detected" );
+        ASSERT( EraseCount==m, "return value of erase() is broken" );
+
+        CheckAllocator(table, m, m, /*exact*/nthread <= 1);
+    }
+    ASSERT( MyDataCount==0, "memory leak detected" );
+}
+
+void TestTypes() {
+    AssertSameType( static_cast<MyTable::key_type*>(0), static_cast<MyKey*>(0) );
+    AssertSameType( static_cast<MyTable::mapped_type*>(0), static_cast<MyData*>(0) );
+    AssertSameType( static_cast<MyTable::value_type*>(0), static_cast<std::pair<const MyKey,MyData>*>(0) );
+    AssertSameType( static_cast<MyTable::accessor::value_type*>(0), static_cast<MyTable::value_type*>(0) );
+    AssertSameType( static_cast<MyTable::const_accessor::value_type*>(0), static_cast<const MyTable::value_type*>(0) );
+    AssertSameType( static_cast<MyTable::size_type*>(0), static_cast<size_t*>(0) );
+    AssertSameType( static_cast<MyTable::difference_type*>(0), static_cast<ptrdiff_t*>(0) );
+}
+
+template<typename Iterator, typename T>
+void TestIteratorTraits() {
+    AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) );
+    AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) );
+    AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) );
+    AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::forward_iterator_tag*>(0) );
+    T x;
+    typename Iterator::reference xr = x;
+    typename Iterator::pointer xp = &x;
+    ASSERT( &xr==xp, NULL );
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAssignment( Iterator2 j ) {
+    Iterator1 i(j), k;
+    ASSERT( i==j, NULL ); ASSERT( !(i!=j), NULL );
+    k = j;
+    ASSERT( k==j, NULL ); ASSERT( !(k!=j), NULL );
+}
+
+template<typename Range1, typename Range2>
+void TestRangeAssignment( Range2 r2 ) {
+    Range1 r1(r2); r1 = r2;
+}
+//------------------------------------------------------------------------
+// Test for copy constructor and assignment
+//------------------------------------------------------------------------
+
+template<typename MyTable>
+static void FillTable( MyTable& x, int n ) {
+    for( int i=1; i<=n; ++i ) {
+        MyKey key( MyKey::make(-i) ); // hash values must not be specified in direct order
+        typename MyTable::accessor a;
+        bool b = x.insert(a,key); 
+        ASSERT(b, NULL);
+        a->second.set_value( i*i );
+    }
+}
+
+template<typename MyTable>
+static void CheckTable( const MyTable& x, int n ) {
+    ASSERT( x.size()==size_t(n), "table is different size than expected" );
+    ASSERT( x.empty()==(n==0), NULL );
+    ASSERT( x.size()<=x.max_size(), NULL );
+    for( int i=1; i<=n; ++i ) {
+        MyKey key( MyKey::make(-i) );
+        typename MyTable::const_accessor a;
+        bool b = x.find(a,key); 
+        ASSERT( b, NULL ); 
+        ASSERT( a->second.value_of()==i*i, NULL );
+    }
+    int count = 0;
+    int key_sum = 0;
+    for( typename MyTable::const_iterator i(x.begin()); i!=x.end(); ++i ) {
+        ++count;
+        key_sum += -i->first.value_of();
+    }
+    ASSERT( count==n, NULL );
+    ASSERT( key_sum==n*(n+1)/2, NULL );
+}
+
+static void TestCopy() {
+    REMARK("testing copy\n");
+    MyTable t1;
+    for( int i=0; i<10000; i=(i<100 ? i+1 : i*3) ) {
+        MyDataCount = 0;
+
+        FillTable(t1,i);
+        // Do not call CheckTable(t1,i) before copying, it enforces rehashing
+
+        MyTable t2(t1);
+        // Check that copy constructor did not mangle source table.
+        CheckTable(t1,i);
+        swap(t1, t2);
+        CheckTable(t1,i);
+        ASSERT( !(t1 != t2), NULL );
+
+        // Clear original table
+        t2.clear();
+        swap(t2, t1);
+        CheckTable(t1,0);
+
+        // Verify that copy of t1 is correct, even after t1 is cleared.
+        CheckTable(t2,i);
+        t2.clear();
+        t1.swap( t2 );
+        CheckTable(t1,0);
+        CheckTable(t2,0);
+        ASSERT( MyDataCount==0, "data leak?" );
+    }
+}
+
+void TestAssignment() {
+    REMARK("testing assignment\n");
+    for( int i=0; i<1000; i=(i<30 ? i+1 : i*5) ) {
+        for( int j=0; j<1000; j=(j<30 ? j+1 : j*7) ) {
+            MyTable t1;
+            MyTable t2;
+            FillTable(t1,i);
+            FillTable(t2,j);
+            ASSERT( (t1 == t2) == (i == j), NULL );
+            CheckTable(t2,j);
+
+            MyTable& tref = t2=t1; 
+            ASSERT( &tref==&t2, NULL );
+            ASSERT( t1 == t2, NULL );
+            CheckTable(t1,i);
+            CheckTable(t2,i);
+
+            t1.clear();
+            CheckTable(t1,0);
+            CheckTable(t2,i);
+            ASSERT( MyDataCount==i, "data leak?" );
+
+            t2.clear();
+            CheckTable(t1,0);
+            CheckTable(t2,0);
+            ASSERT( MyDataCount==0, "data leak?" );
+        }
+    }
+}
+
+void TestIteratorsAndRanges() {
+    REMARK("testing iterators compliance\n");
+    TestIteratorTraits<MyTable::iterator,MyTable::value_type>();
+    TestIteratorTraits<MyTable::const_iterator,const MyTable::value_type>();
+
+    MyTable v;
+    MyTable const &u = v;
+
+    TestIteratorAssignment<MyTable::const_iterator>( u.begin() );
+    TestIteratorAssignment<MyTable::const_iterator>( v.begin() );
+    TestIteratorAssignment<MyTable::iterator>( v.begin() );
+    // doesn't compile as expected: TestIteratorAssignment<typename V::iterator>( u.begin() );
+
+    // check for non-existing 
+    ASSERT(v.equal_range(MyKey::make(-1)) == std::make_pair(v.end(), v.end()), NULL);
+    ASSERT(u.equal_range(MyKey::make(-1)) == std::make_pair(u.end(), u.end()), NULL);
+
+    REMARK("testing ranges compliance\n");
+    TestRangeAssignment<MyTable::const_range_type>( u.range() );
+    TestRangeAssignment<MyTable::const_range_type>( v.range() );
+    TestRangeAssignment<MyTable::range_type>( v.range() );
+    // doesn't compile as expected: TestRangeAssignment<typename V::range_type>( u.range() );
+
+    REMARK("testing construction and insertion from iterators range\n");
+    FillTable( v, 1000 );
+    MyTable2 t(v.begin(), v.end());
+    v.rehash();
+    CheckTable(t, 1000);
+    t.insert(v.begin(), v.end()); // do nothing
+    CheckTable(t, 1000);
+    t.clear();
+    t.insert(v.begin(), v.end()); // restore
+    CheckTable(t, 1000);
+
+    REMARK("testing comparison\n");
+    typedef tbb::concurrent_hash_map<MyKey,MyData2,YourHashCompare,MyAllocator> YourTable1;
+    typedef tbb::concurrent_hash_map<MyKey,MyData2,YourHashCompare> YourTable2;
+    YourTable1 t1;
+    FillTable( t1, 10 );
+    CheckTable(t1, 10 );
+    YourTable2 t2(t1.begin(), t1.end());
+    MyKey key( MyKey::make(-5) ); MyData2 data;
+    ASSERT(t2.erase(key), NULL);
+    YourTable2::accessor a;
+    ASSERT(t2.insert(a, key), NULL);
+    data.set_value(0);   a->second = data;
+    ASSERT( t1 != t2, NULL);
+    data.set_value(5*5); a->second = data;
+    ASSERT( t1 == t2, NULL);
+}
+
+void TestRehash() {
+    REMARK("testing rehashing\n");
+    MyTable w;
+    w.insert( std::make_pair(MyKey::make(-5), MyData()) );
+    w.rehash(); // without this, assertion will fail
+    MyTable::iterator it = w.begin();
+    int i = 0; // check for non-rehashed buckets
+    for( ; it != w.end(); i++ )
+        w.count( (it++)->first );
+    ASSERT( i == 1, NULL );
+    for( i=0; i<1000; i=(i<29 ? i+1 : i*2) ) {
+        for( int j=max(256+i, i*2); j<10000; j*=3 ) {
+            MyTable v;
+            FillTable( v, i );
+            ASSERT(int(v.size()) == i, NULL);
+            ASSERT(int(v.bucket_count()) <= j, NULL);
+            v.rehash( j );
+            ASSERT(int(v.bucket_count()) >= j, NULL);
+            CheckTable( v, i );
+        }
+    }
+}
+
+#if TBB_USE_EXCEPTIONS
+void TestExceptions() {
+    typedef local_counting_allocator<tbb::tbb_allocator<MyData2> > allocator_t;
+    typedef tbb::concurrent_hash_map<MyKey,MyData2,MyHashCompare,allocator_t> ThrowingTable;
+    enum methods {
+        zero_method = 0,
+        ctor_copy, op_assign, op_insert,
+        all_methods
+    };
+    REMARK("testing exception-safety guarantees\n");
+    ThrowingTable src;
+    FillTable( src, 1000 );
+    ASSERT( MyDataCount==1000, NULL );
+
+    try {
+        for(int t = 0; t < 2; t++) // exception type
+        for(int m = zero_method+1; m < all_methods; m++)
+        {
+            allocator_t a;
+            if(t) MyDataCountLimit = 101;
+            else a.set_limits(101);
+            ThrowingTable victim(a);
+            MyDataCount = 0;
+
+            try {
+                switch(m) {
+                case ctor_copy: {
+                        ThrowingTable acopy(src, a);
+                    } break;
+                case op_assign: {
+                        victim = src;
+                    } break;
+                case op_insert: {
+                        FillTable( victim, 1000 );
+                    } break;
+                default:;
+                }
+                ASSERT(false, "should throw an exception");
+            } catch(std::bad_alloc &e) {
+                MyDataCountLimit = 0;
+                size_t size = victim.size();
+                switch(m) {
+                case op_assign:
+                    ASSERT( MyDataCount==100, "data leak?" );
+                    ASSERT( size>=100, NULL );
+                    CheckAllocator(victim, 100+t, t);
+                case ctor_copy:
+                    CheckTable(src, 1000);
+                    break;
+                case op_insert:
+                    ASSERT( size==size_t(100-t), NULL );
+                    ASSERT( MyDataCount==100-t, "data leak?" );
+                    CheckTable(victim, 100-t);
+                    CheckAllocator(victim, 100, t);
+                    break;
+
+                default:; // nothing to check here
+                }
+                REMARK("Exception %d: %s\t- ok ()\n", m, e.what());
+            }
+            catch ( ... ) {
+                ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception" );
+            }
+        }
+    } catch(...) {
+        ASSERT(false, "unexpected exception");
+    }
+    src.clear(); MyDataCount = 0;
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+//------------------------------------------------------------------------
+// Test driver
+//------------------------------------------------------------------------
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    if( MinThread<0 ) {
+        REPORT("ERROR: must use at least one thread\n");
+        exit(1);
+    }
+
+    // Do serial tests
+    TestTypes();
+    TestCopy();
+    TestRehash();
+    TestAssignment();
+    TestIteratorsAndRanges();
+#if TBB_USE_EXCEPTIONS
+    TestExceptions();
+#endif /* TBB_USE_EXCEPTIONS */
+
+    // Do concurrency tests.
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        tbb::task_scheduler_init init( nthread );
+        TestInsertFindErase( nthread );
+        TestConcurrency( nthread );
+    }
+    // check linking
+    if(bad_hashing) { //should be false
+        tbb::internal::runtime_warning("none\nERROR: it must not be executed");
+    }
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_monitor.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_monitor.cpp
new file mode 100644 (file)
index 0000000..71246d8
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_monitor.h"
+#include "tbb/atomic.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+#include "harness.h"
+#include "tbb/concurrent_monitor.cpp"
+
+using namespace tbb;
+
+//! Queuing lock with concurrent_monitor; to test concurrent_monitor::notify( Predicate p )
+class QueuingMutex {
+public:
+    //! Construct unacquired mutex.
+    QueuingMutex() { q_tail = NULL; }
+
+    //! The scoped locking pattern
+    class ScopedLock: internal::no_copy {
+        void Initialize() { mutex = NULL; }
+    public:
+        ScopedLock() {Initialize();}
+        ScopedLock( QueuingMutex& m ) { Initialize(); Acquire(m); }
+        ~ScopedLock() { if( mutex ) Release(); }
+        void Acquire( QueuingMutex& m );
+        void Release();
+        void SleepPerhaps();
+
+    private:
+        QueuingMutex* mutex;
+        ScopedLock* next;
+        uintptr_t going;
+        internal::concurrent_monitor::thread_context thr_ctx;
+    };
+
+    friend class ScopedLock;
+private:
+    //! The last competitor requesting the lock
+    atomic<ScopedLock*> q_tail;
+    internal::concurrent_monitor waitq;
+};
+
+struct PredicateEq {
+    void* p;
+    PredicateEq( void* p_ ) : p(p_) {}
+    bool operator() ( void* v ) const {return p==v;}
+};
+
+//! A method to acquire QueuingMutex lock
+void QueuingMutex::ScopedLock::Acquire( QueuingMutex& m )
+{
+    // Must set all fields before the fetch_and_store, because once the
+    // fetch_and_store executes, *this becomes accessible to other threads.
+    mutex = &m;
+    next  = NULL;
+    going = 0;
+
+    // The fetch_and_store must have release semantics, because we are
+    // "sending" the fields initialized above to other processors.
+    ScopedLock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
+    if( pred ) {
+        __TBB_ASSERT( !pred->next, "the predecessor has another successor!");
+        pred->next = this;
+        for( int i=0; i<16; ++i ) {
+            if( going!=0ul ) break;
+            __TBB_Yield();
+        }
+        SleepPerhaps();
+    }
+
+    // Force acquire so that user's critical section receives correct values
+    // from processor that was previously in the user's critical section.
+    __TBB_load_with_acquire(going);
+}
+
+//! A method to release QueuingMutex lock
+void QueuingMutex::ScopedLock::Release( )
+{
+    if( !next ) {
+        if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
+            // this was the only item in the queue, and the queue is now empty.
+            goto done;
+        }
+        // Someone in the queue
+        spin_wait_while_eq( next, (ScopedLock*)0 );
+    }
+    __TBB_store_with_release(next->going, 1);
+    mutex->waitq.notify( PredicateEq(next) );
+done:
+    Initialize();
+}
+
+//! Yield and block; go to sleep
+void QueuingMutex::ScopedLock::SleepPerhaps()
+{
+    bool slept = false;
+    internal::concurrent_monitor& mq = mutex->waitq;
+    mq.prepare_wait( thr_ctx, this );
+    while( going==0ul ) {
+        if( (slept=mq.commit_wait( thr_ctx ))==true )
+            break;
+        mq.prepare_wait( thr_ctx, this );
+    }
+    if( !slept )
+        mq.cancel_wait( thr_ctx );
+}
+
+// Spin lock with concurrent_monitor; to test concurrent_monitor::notify_all() and concurrent_monitor::notify()
+class SpinMutex {
+public:
+    //! Construct unacquired mutex.
+    SpinMutex() : toggle(false) { flag = 0; }
+
+    //! The scoped locking pattern
+    class ScopedLock: internal::no_copy {
+        void Initialize() { mutex = NULL; }
+    public:
+        ScopedLock() {Initialize();}
+        ScopedLock( SpinMutex& m ) { Initialize(); Acquire(m); }
+        ~ScopedLock() { if( mutex ) Release(); }
+        void Acquire( SpinMutex& m );
+        void Release();
+        void SleepPerhaps();
+
+    private:
+        SpinMutex* mutex;
+        internal::concurrent_monitor::thread_context thr_ctx;
+    };
+
+    friend class ScopedLock;
+private:
+    tbb::atomic<unsigned> flag;
+    bool toggle;
+    internal::concurrent_monitor waitq;
+};
+
+//! A method to acquire SpinMutex lock
+void SpinMutex::ScopedLock::Acquire( SpinMutex& m )
+{
+    mutex = &m;
+retry:
+    if( m.flag.compare_and_swap( 1, 0 )!=0 ) {
+        SleepPerhaps();
+        goto retry;
+    }
+}
+
+//! A method to release SpinMutex lock
+void SpinMutex::ScopedLock::Release()
+{
+    bool old_toggle = mutex->toggle;
+    mutex->toggle = !mutex->toggle;
+    mutex->flag = 0;
+    if( old_toggle )
+        mutex->waitq.notify_one();
+    else
+        mutex->waitq.notify_all();
+}
+
+//! Yield and block; go to sleep
+void SpinMutex::ScopedLock::SleepPerhaps()
+{
+    bool slept = false;
+    internal::concurrent_monitor& mq = mutex->waitq;
+    mq.prepare_wait( thr_ctx, this );
+    while( mutex->flag ) {
+        if( (slept=mq.commit_wait( thr_ctx ))==true )
+            break;
+        mq.prepare_wait( thr_ctx, this );
+    }
+    if( !slept )
+        mq.cancel_wait( thr_ctx );
+}
+
+template<typename M>
+struct Counter {
+    typedef M mutex_type;
+    M mutex;
+    volatile long value;
+};
+
+//! Function object for use with parallel_for.h.
+template<typename C>
+struct AddOne: NoAssign {
+    C& counter;
+    /** Increments counter once for each iteration in the iteration space. */
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            typename C::mutex_type::ScopedLock lock(counter.mutex);
+            counter.value = counter.value+1;
+        }
+    }
+    AddOne( C& counter_ ) : counter(counter_) {}
+};
+
+//! Generic test of a TBB mutex type M.
+/** Does not test features specific to reader-writer locks. */
+template<typename M>
+void Test() {
+    Counter<M> counter;
+    counter.value = 0;
+    const int n = 100000;
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/10),AddOne<Counter<M> >(counter));
+    if( counter.value!=n )
+        REPORT("ERROR : counter.value=%ld\n",counter.value);
+}
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK( "testing with %d workers\n", static_cast<int>(p) );
+        // test the predicated notify 
+        Test<QueuingMutex>();
+        // test the notify_all method
+        Test<SpinMutex>();
+        REMARK( "calling destructor for task_scheduler_init\n" );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_queue.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_queue.cpp
new file mode 100644 (file)
index 0000000..1704c75
--- /dev/null
@@ -0,0 +1,968 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_queue.h"
+#include "tbb/atomic.h"
+#include "tbb/tick_count.h"
+#include "harness.h"
+#include "harness_allocator.h"
+
+static tbb::atomic<long> FooConstructed;
+static tbb::atomic<long> FooDestroyed;
+
+class Foo {
+    enum state_t{
+        LIVE=0x1234,
+        DEAD=0xDEAD
+    };
+    state_t state;
+public:
+    int thread_id;
+    int serial;
+    Foo() : state(LIVE), thread_id(0), serial(0) {
+        ++FooConstructed;
+    }
+    Foo( const Foo& item ) : state(LIVE) {
+        ASSERT( item.state==LIVE, NULL );
+        ++FooConstructed;
+        thread_id = item.thread_id;
+        serial = item.serial;
+    }
+    ~Foo() {
+        ASSERT( state==LIVE, NULL );
+        ++FooDestroyed;
+        state=DEAD;
+        thread_id=0xDEAD;
+        serial=0xDEAD;
+    }
+    void operator=( const Foo& item ) {
+        ASSERT( item.state==LIVE, NULL );
+        ASSERT( state==LIVE, NULL );
+        thread_id = item.thread_id;
+        serial = item.serial;
+    }
+    bool is_const() {return false;}
+    bool is_const() const {return true;}
+};
+
+// problem size
+static const int N = 50000;     // # of bytes
+
+#if TBB_USE_EXCEPTIONS
+//! Exception for concurrent_queue
+class Foo_exception : public std::bad_alloc {
+public:
+    virtual const char *what() const throw() { return "out of Foo limit"; }
+    virtual ~Foo_exception() throw() {}
+};
+
+static tbb::atomic<long> FooExConstructed;
+static tbb::atomic<long> FooExDestroyed;
+static tbb::atomic<long> serial_source;
+static long MaxFooCount = 0;
+static const long Threshold = 400;
+
+class FooEx {
+    enum state_t{
+        LIVE=0x1234,
+        DEAD=0xDEAD
+    };
+    state_t state;
+public:
+    int serial;
+    FooEx() : state(LIVE) {
+        ++FooExConstructed;
+        serial = serial_source++;
+    }
+
+    FooEx( const FooEx& item ) : state(LIVE) {
+        ++FooExConstructed;
+        if( MaxFooCount && (FooExConstructed-FooExDestroyed) >= MaxFooCount ) // in push()
+            throw Foo_exception();
+        serial = item.serial;
+    }
+    ~FooEx() {
+        ASSERT( state==LIVE, NULL );
+        ++FooExDestroyed;
+        state=DEAD;
+        serial=0xDEAD;
+    }
+    void operator=( FooEx& item ) {
+        ASSERT( item.state==LIVE, NULL );
+        ASSERT( state==LIVE, NULL );
+        serial = item.serial;
+        if( MaxFooCount==2*Threshold && (FooExConstructed-FooExDestroyed) <= MaxFooCount/4 ) // in pop()
+            throw Foo_exception();
+    }
+} ;
+#endif /* TBB_USE_EXCEPTIONS */
+
+const size_t MAXTHREAD = 256;
+
+static int Sum[MAXTHREAD];
+
+//! Count of various pop operations
+/** [0] = pop_if_present that failed
+    [1] = pop_if_present that succeeded
+    [2] = pop */
+static tbb::atomic<long> PopKind[3];
+
+const int M = 10000;
+
+#if TBB_DEPRECATED
+#define CALL_BLOCKING_POP(q,v) (q)->pop(v)
+#define CALL_TRY_POP(q,v,i) (((i)&0x2)?q->try_pop(v):q->pop_if_present(v))
+#define SIZE() size()
+#else
+#define CALL_BLOCKING_POP(q,v) while( !(q)->try_pop(v) ) __TBB_Yield()
+#define CALL_TRY_POP(q,v,i) q->try_pop(v)
+#define SIZE() unsafe_size()
+#endif
+
+struct Body: NoAssign {
+    tbb::concurrent_queue<Foo>* queue;
+    const int nthread;
+    Body( int nthread_ ) : nthread(nthread_) {}
+    void operator()( int thread_id ) const {
+        long pop_kind[3] = {0,0,0};
+        int serial[MAXTHREAD+1];
+        memset( serial, 0, nthread*sizeof(int) );
+        ASSERT( thread_id<nthread, NULL );
+
+        long sum = 0;
+        for( long j=0; j<M; ++j ) {
+            Foo f;
+            f.thread_id = 0xDEAD;
+            f.serial = 0xDEAD;
+            bool prepopped = false;
+            if( j&1 ) {
+                prepopped = CALL_TRY_POP(queue,f,j);
+                ++pop_kind[prepopped];
+            }
+            Foo g;
+            g.thread_id = thread_id;
+            g.serial = j+1;
+            queue->push( g );
+            if( !prepopped ) {
+                CALL_BLOCKING_POP(queue,f);
+                ++pop_kind[2];
+            }
+            ASSERT( f.thread_id<=nthread, NULL );
+            ASSERT( f.thread_id==nthread || serial[f.thread_id]<f.serial, "partial order violation" );
+            serial[f.thread_id] = f.serial;
+            sum += f.serial-1;
+        }
+        Sum[thread_id] = sum;
+        for( int k=0; k<3; ++k )
+            PopKind[k] += pop_kind[k];
+    }
+};
+
+void TestPushPop( size_t prefill, ptrdiff_t capacity, int nthread ) {
+    ASSERT( nthread>0, "nthread must be positive" );
+#if TBB_DEPRECATED
+    ptrdiff_t signed_prefill = ptrdiff_t(prefill);
+    if( signed_prefill+1>=capacity )
+        return;
+#endif
+    bool success = false;
+    for( int k=0; k<3; ++k )
+        PopKind[k] = 0;
+    for( int trial=0; !success; ++trial ) {
+        FooConstructed = 0;
+        FooDestroyed = 0;
+        Body body(nthread);
+        tbb::concurrent_queue<Foo> queue;
+#if TBB_DEPRECATED
+        queue.set_capacity( capacity );
+#endif
+        body.queue = &queue;
+        for( size_t i=0; i<prefill; ++i ) {
+            Foo f;
+            f.thread_id = nthread;
+            f.serial = 1+int(i);
+            queue.push(f);
+            ASSERT( unsigned(queue.SIZE())==i+1, NULL );
+            ASSERT( !queue.empty(), NULL );
+        }
+        tbb::tick_count t0 = tbb::tick_count::now();
+        NativeParallelFor( nthread, body );
+        tbb::tick_count t1 = tbb::tick_count::now();
+        double timing = (t1-t0).seconds();
+        REMARK("prefill=%d capacity=%d threads=%d time = %g = %g nsec/operation\n", int(prefill), int(capacity), nthread, timing, timing/(2*M*nthread)*1.E9);
+        int sum = 0;
+        for( int k=0; k<nthread; ++k )
+            sum += Sum[k];
+        int expected = int(nthread*((M-1)*M/2) + ((prefill-1)*prefill)/2);
+        for( int i=int(prefill); --i>=0; ) {
+            ASSERT( !queue.empty(), NULL );
+            Foo f;
+            bool result = queue.try_pop(f);
+            ASSERT( result, NULL );
+            ASSERT( int(queue.SIZE())==i, NULL );
+            sum += f.serial-1;
+        }
+        ASSERT( queue.empty(), NULL );
+        ASSERT( queue.SIZE()==0, NULL );
+        if( sum!=expected )
+            REPORT("sum=%d expected=%d\n",sum,expected);
+        ASSERT( FooConstructed==FooDestroyed, NULL );
+        // TODO: checks by counting allocators
+
+        success = true;
+        if( nthread>1 && prefill==0 ) {
+            // Check that pop_if_present got sufficient exercise
+            for( int k=0; k<2; ++k ) {
+#if (_WIN32||_WIN64)
+                // The TBB library on Windows seems to have a tough time generating
+                // the desired interleavings for pop_if_present, so the code tries longer, and settles
+                // for fewer desired interleavings.
+                const int max_trial = 100;
+                const int min_requirement = 20;
+#else
+                const int min_requirement = 100;
+                const int max_trial = 20;
+#endif /* _WIN32||_WIN64 */
+                if( PopKind[k]<min_requirement ) {
+                    if( trial>=max_trial ) {
+                        if( Verbose )
+                            REPORT("Warning: %d threads had only %ld pop_if_present operations %s after %d trials (expected at least %d). "
+                               "This problem may merely be unlucky scheduling. "
+                               "Investigate only if it happens repeatedly.\n",
+                               nthread, long(PopKind[k]), k==0?"failed":"succeeded", max_trial, min_requirement);
+                        else
+                            REPORT("Warning: the number of %s pop_if_present operations is less than expected for %d threads. Investigate if it happens repeatedly.\n",
+                               k==0?"failed":"succeeded", nthread ); 
+
+                    } else {
+                        success = false;
+                    }
+               }
+            }
+        }
+    }
+}
+
+class Bar {
+    enum state_t {
+        LIVE=0x1234,
+        DEAD=0xDEAD
+    };
+    state_t state;
+public:
+    ptrdiff_t my_id;
+    Bar() : state(LIVE), my_id(-1) {}
+    Bar(size_t _i) : state(LIVE), my_id(_i) {}
+    Bar( const Bar& a_bar ) : state(LIVE) {
+        ASSERT( a_bar.state==LIVE, NULL );
+        my_id = a_bar.my_id;
+    }
+    ~Bar() {
+        ASSERT( state==LIVE, NULL );
+        state = DEAD;
+        my_id = DEAD;
+    }
+    void operator=( const Bar& a_bar ) {
+        ASSERT( a_bar.state==LIVE, NULL );
+        ASSERT( state==LIVE, NULL );
+        my_id = a_bar.my_id;
+    }
+    friend bool operator==(const Bar& bar1, const Bar& bar2 ) ;
+} ;
+
+bool operator==(const Bar& bar1, const Bar& bar2) {
+    ASSERT( bar1.state==Bar::LIVE, NULL );
+    ASSERT( bar2.state==Bar::LIVE, NULL );
+    return bar1.my_id == bar2.my_id;
+}
+
+class BarIterator
+{
+    Bar* bar_ptr;
+    BarIterator(Bar* bp_) : bar_ptr(bp_) {}
+public:
+    ~BarIterator() {}
+    BarIterator& operator=( const BarIterator& other ) {
+        bar_ptr = other.bar_ptr;
+        return *this;
+    }
+    Bar& operator*() const {
+        return *bar_ptr;
+    }
+    BarIterator& operator++() {
+        ++bar_ptr;
+        return *this;
+    }
+    Bar* operator++(int) {
+        Bar* result = &operator*();
+        operator++();
+        return result;
+    }
+    friend bool operator==(const BarIterator& bia, const BarIterator& bib) ;
+    friend bool operator!=(const BarIterator& bia, const BarIterator& bib) ;
+    friend void TestConstructors ();
+} ;
+
+bool operator==(const BarIterator& bia, const BarIterator& bib) {
+    return bia.bar_ptr==bib.bar_ptr;
+}
+
+bool operator!=(const BarIterator& bia, const BarIterator& bib) {
+    return bia.bar_ptr!=bib.bar_ptr;
+}
+
+#if TBB_USE_EXCEPTIONS
+class Bar_exception : public std::bad_alloc {
+public:
+    virtual const char *what() const throw() { return "making the entry invalid"; }
+    virtual ~Bar_exception() throw() {}
+};
+
+class BarEx {
+    enum state_t {
+        LIVE=0x1234,
+        DEAD=0xDEAD
+    };
+    static int count;
+public:
+    state_t state;
+    typedef enum {
+        PREPARATION,
+        COPY_CONSTRUCT
+    } mode_t;
+    static mode_t mode;
+    ptrdiff_t my_id;
+    ptrdiff_t my_tilda_id;
+    static int button;
+    BarEx() : state(LIVE), my_id(-1), my_tilda_id(-1) {}
+    BarEx(size_t _i) : state(LIVE), my_id(_i), my_tilda_id(my_id^(-1)) {}
+    BarEx( const BarEx& a_bar ) : state(LIVE) {
+        ASSERT( a_bar.state==LIVE, NULL );
+        my_id = a_bar.my_id;
+        if( mode==PREPARATION ) 
+            if( !( ++count % 100 ) ) 
+                throw Bar_exception();
+        my_tilda_id = a_bar.my_tilda_id;
+    }
+    ~BarEx() {
+        ASSERT( state==LIVE, NULL );
+        state = DEAD;
+        my_id = DEAD;
+    }
+    static void set_mode( mode_t m ) { mode = m; }
+    void operator=( const BarEx& a_bar ) {
+        ASSERT( a_bar.state==LIVE, NULL );
+        ASSERT( state==LIVE, NULL );
+        my_id = a_bar.my_id;
+        my_tilda_id = a_bar.my_tilda_id;
+    }
+    friend bool operator==(const BarEx& bar1, const BarEx& bar2 ) ;
+} ;
+
+int    BarEx::count = 0;
+BarEx::mode_t BarEx::mode = BarEx::PREPARATION;
+
+bool operator==(const BarEx& bar1, const BarEx& bar2) {
+    ASSERT( bar1.state==BarEx::LIVE, NULL );
+    ASSERT( bar2.state==BarEx::LIVE, NULL );
+    ASSERT( (bar1.my_id ^ bar1.my_tilda_id) == -1, NULL );
+    ASSERT( (bar2.my_id ^ bar2.my_tilda_id) == -1, NULL );
+    return bar1.my_id==bar2.my_id && bar1.my_tilda_id==bar2.my_tilda_id;
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+#if TBB_DEPRECATED
+#define CALL_BEGIN(q,i) (((i)&0x1)?q.begin():q.unsafe_begin())
+#define CALL_END(q,i)   (((i)&0x1)?q.end():q.unsafe_end())
+#else
+#define CALL_BEGIN(q,i) q.unsafe_begin()
+#define CALL_END(q,i)   q.unsafe_end()
+#endif
+
+void TestConstructors ()
+{
+    tbb::concurrent_queue<Bar> src_queue;
+    tbb::concurrent_queue<Bar>::const_iterator dqb;
+    tbb::concurrent_queue<Bar>::const_iterator dqe;
+    tbb::concurrent_queue<Bar>::const_iterator iter;
+
+    for( size_t size=0; size<1001; ++size ) {
+        for( size_t i=0; i<size; ++i )
+            src_queue.push(Bar(i+(i^size)));
+        tbb::concurrent_queue<Bar>::const_iterator sqb( CALL_BEGIN(src_queue,size) );
+        tbb::concurrent_queue<Bar>::const_iterator sqe( CALL_END(src_queue,size));
+
+        tbb::concurrent_queue<Bar> dst_queue(sqb, sqe);
+
+        ASSERT(src_queue.SIZE()==dst_queue.SIZE(), "different size");
+
+        src_queue.clear();
+    }
+
+    Bar bar_array[1001];
+    for( size_t size=0; size<1001; ++size ) {
+        for( size_t i=0; i<size; ++i )
+            bar_array[i] = Bar(i+(i^size));
+
+        const BarIterator sab(bar_array+0);
+        const BarIterator sae(bar_array+size);
+
+        tbb::concurrent_queue<Bar> dst_queue2(sab, sae);
+
+        ASSERT( size==unsigned(dst_queue2.SIZE()), NULL );
+        ASSERT( sab==BarIterator(bar_array+0), NULL );
+        ASSERT( sae==BarIterator(bar_array+size), NULL );
+
+        dqb = CALL_BEGIN(dst_queue2,size);
+        dqe = CALL_END(dst_queue2,size);
+        BarIterator v_iter(sab);
+        for( ; dqb != dqe; ++dqb, ++v_iter )
+            ASSERT( *dqb == *v_iter, "unexpected element" );
+        ASSERT( v_iter==sae, "different size?" );
+    }
+
+    src_queue.clear();
+
+    tbb::concurrent_queue<Bar> dst_queue3( src_queue );
+    ASSERT( src_queue.SIZE()==dst_queue3.SIZE(), NULL );
+    ASSERT( 0==dst_queue3.SIZE(), NULL );
+
+    int k=0;
+    for( size_t i=0; i<1001; ++i ) {
+        Bar tmp_bar;
+        src_queue.push(Bar(++k));
+        src_queue.push(Bar(++k));
+        src_queue.try_pop(tmp_bar);
+
+        tbb::concurrent_queue<Bar> dst_queue4( src_queue );
+
+        ASSERT( src_queue.SIZE()==dst_queue4.SIZE(), NULL );
+
+        dqb = CALL_BEGIN(dst_queue4,i);
+        dqe = CALL_END(dst_queue4,i);
+        iter = CALL_BEGIN(src_queue,i);
+
+        for( ; dqb != dqe; ++dqb, ++iter )
+            ASSERT( *dqb == *iter, "unexpected element" );
+
+        ASSERT( iter==CALL_END(src_queue,i), "different size?" );
+    }
+
+    tbb::concurrent_queue<Bar> dst_queue5( src_queue );
+
+    ASSERT( src_queue.SIZE()==dst_queue5.SIZE(), NULL );
+    dqb = dst_queue5.unsafe_begin();
+    dqe = dst_queue5.unsafe_end();
+    iter = src_queue.unsafe_begin();
+    for( ; dqb != dqe; ++dqb, ++iter )
+        ASSERT( *dqb == *iter, "unexpected element" );
+
+    for( size_t i=0; i<100; ++i) {
+        Bar tmp_bar;
+        src_queue.push(Bar(i+1000));
+        src_queue.push(Bar(i+1000));
+        src_queue.try_pop(tmp_bar);
+
+        dst_queue5.push(Bar(i+1000));
+        dst_queue5.push(Bar(i+1000));
+        dst_queue5.try_pop(tmp_bar);
+    }
+
+    ASSERT( src_queue.SIZE()==dst_queue5.SIZE(), NULL );
+    dqb = dst_queue5.unsafe_begin();
+    dqe = dst_queue5.unsafe_end();
+    iter = src_queue.unsafe_begin();
+    for( ; dqb != dqe; ++dqb, ++iter )
+        ASSERT( *dqb == *iter, "unexpected element" );
+    ASSERT( iter==src_queue.unsafe_end(), "different size?" );
+
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN || __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN
+    REPORT("Known issue: part of the constructor test is skipped.\n");
+#elif TBB_USE_EXCEPTIONS
+    k = 0;
+#if TBB_DEPRECATED==0
+    unsigned
+#endif
+    int n_elements=0;
+    tbb::concurrent_queue<BarEx> src_queue_ex;
+    for( size_t size=0; size<1001; ++size ) {
+        BarEx tmp_bar_ex;
+        int n_successful_pushes=0;
+        BarEx::set_mode( BarEx::PREPARATION );
+        try {
+            src_queue_ex.push(BarEx(k+(k^size)));
+            ++n_successful_pushes;
+        } catch (...) {
+        }
+        ++k;
+        try {
+            src_queue_ex.push(BarEx(k+(k^size)));
+            ++n_successful_pushes;
+        } catch (...) {
+        }
+        ++k;
+        src_queue_ex.try_pop(tmp_bar_ex);
+        n_elements += (n_successful_pushes - 1);
+        ASSERT( src_queue_ex.SIZE()==n_elements, NULL);
+
+        BarEx::set_mode( BarEx::COPY_CONSTRUCT );
+        tbb::concurrent_queue<BarEx> dst_queue_ex( src_queue_ex );
+
+        ASSERT( src_queue_ex.SIZE()==dst_queue_ex.SIZE(), NULL );
+
+        tbb::concurrent_queue<BarEx>::const_iterator dqb_ex  = CALL_BEGIN(dst_queue_ex, size);
+        tbb::concurrent_queue<BarEx>::const_iterator dqe_ex  = CALL_END(dst_queue_ex, size);
+        tbb::concurrent_queue<BarEx>::const_iterator iter_ex = CALL_BEGIN(src_queue_ex, size);
+
+        for( ; dqb_ex != dqe_ex; ++dqb_ex, ++iter_ex )
+            ASSERT( *dqb_ex == *iter_ex, "unexpected element" );
+        ASSERT( iter_ex==CALL_END(src_queue_ex,size), "different size?" );
+    }
+#endif /* TBB_USE_EXCEPTIONS */
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAux( Iterator1 i, Iterator2 j, int size ) {
+    // Now test iteration
+    Iterator1 old_i;
+    for( int k=0; k<size; ++k ) {
+        ASSERT( i!=j, NULL );
+        ASSERT( !(i==j), NULL );
+        Foo f;
+        if( k&1 ) {
+            // Test pre-increment
+            f = *old_i++;
+            // Test assignment
+            i = old_i;
+        } else {
+            // Test post-increment
+            f=*i++;
+            if( k<size-1 ) {
+                // Test "->"
+                ASSERT( k+2==i->serial, NULL );
+            }
+            // Test assignment
+            old_i = i;
+        }
+        ASSERT( k+1==f.serial, NULL );
+    }
+    ASSERT( !(i!=j), NULL );
+    ASSERT( i==j, NULL );
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAssignment( Iterator2 j ) {
+    Iterator1 i(j);
+    ASSERT( i==j, NULL );
+    ASSERT( !(i!=j), NULL );
+    Iterator1 k;
+    k = j;
+    ASSERT( k==j, NULL );
+    ASSERT( !(k!=j), NULL );
+}
+
+template<typename Iterator, typename T>
+void TestIteratorTraits() {
+    AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) );
+    AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) );
+    AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) );
+    AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::forward_iterator_tag*>(0) );
+    T x;
+    typename Iterator::reference xr = x;
+    typename Iterator::pointer xp = &x;
+    ASSERT( &xr==xp, NULL );
+}
+
+//! Test the iterators for concurrent_queue
+void TestIterator() {
+    tbb::concurrent_queue<Foo> queue;
+    const tbb::concurrent_queue<Foo>& const_queue = queue;
+    for( int j=0; j<500; ++j ) {
+        TestIteratorAux( CALL_BEGIN(queue,j)      , CALL_END(queue,j)      , j );
+        TestIteratorAux( CALL_BEGIN(const_queue,j), CALL_END(const_queue,j), j );
+        TestIteratorAux( CALL_BEGIN(const_queue,j), CALL_END(queue,j)      , j );
+        TestIteratorAux( CALL_BEGIN(queue,j)      , CALL_END(const_queue,j), j );
+        Foo f;
+        f.serial = j+1;
+        queue.push(f);
+    }
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::const_iterator>( const_queue.unsafe_begin() );
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::const_iterator>( queue.unsafe_begin() );
+    TestIteratorAssignment<tbb::concurrent_queue<Foo>::iterator>( queue.unsafe_begin() );
+    TestIteratorTraits<tbb::concurrent_queue<Foo>::const_iterator, const Foo>();
+    TestIteratorTraits<tbb::concurrent_queue<Foo>::iterator, Foo>();
+}
+
+void TestConcurrentQueueType() {
+    AssertSameType( tbb::concurrent_queue<Foo>::value_type(), Foo() );
+    Foo f;
+    const Foo g;
+    tbb::concurrent_queue<Foo>::reference r = f;
+    ASSERT( &r==&f, NULL );
+    ASSERT( !r.is_const(), NULL );
+    tbb::concurrent_queue<Foo>::const_reference cr = g;
+    ASSERT( &cr==&g, NULL );
+    ASSERT( cr.is_const(), NULL );
+}
+
+template<typename T>
+void TestEmptyQueue() {
+    const tbb::concurrent_queue<T> queue;
+    ASSERT( queue.SIZE()==0, NULL );
+#if TBB_DEPRECATED
+    ASSERT( queue.capacity()>0, NULL );
+    ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL );
+#endif
+}
+
+#if TBB_DEPRECATED
+#define CALL_TRY_PUSH(q,f,i) (((i)&0x1)?(q).push_if_not_full(f):(q).try_push(f))
+void TestFullQueue() {
+    for( int n=0; n<10; ++n ) {
+        FooConstructed = 0;
+        FooDestroyed = 0;
+        tbb::concurrent_queue<Foo> queue;
+        queue.set_capacity(n);
+        for( int i=0; i<=n; ++i ) {
+            Foo f;
+            f.serial = i;
+            bool result = CALL_TRY_PUSH(queue, f, i );
+            ASSERT( result==(i<n), NULL );
+        }
+        for( int i=0; i<=n; ++i ) {
+            Foo f;
+            bool result = queue.pop_if_present( f );
+            ASSERT( result==(i<n), NULL );
+            ASSERT( !result || f.serial==i, NULL );
+        }
+        ASSERT( FooConstructed==FooDestroyed, NULL );
+    }
+}
+#endif /* if TBB_DEPRECATED */
+
+#if TBB_DEPRECATED
+#define CALL_PUSH_IF_NOT_FULL(q,v,i) (((i)&0x1)?q.push_if_not_full(v):(q.push(v), true))
+#else
+#define CALL_PUSH_IF_NOT_FULL(q,v,i) (q.push(v), true)
+#endif
+
+void TestClear() {
+    FooConstructed = 0;
+    FooDestroyed = 0;
+    const unsigned int n=5;
+        
+    tbb::concurrent_queue<Foo> queue;
+#if TBB_DEPRECATED
+    const int q_capacity=10;
+    queue.set_capacity(q_capacity);
+#endif
+    for( size_t i=0; i<n; ++i ) {
+        Foo f;
+        f.serial = int(i);
+        bool result = CALL_PUSH_IF_NOT_FULL(queue, f, i);
+        ASSERT( result, NULL );
+    }
+    ASSERT( unsigned(queue.SIZE())==n, NULL );
+    queue.clear();
+    ASSERT( queue.SIZE()==0, NULL );
+    for( size_t i=0; i<n; ++i ) {
+        Foo f;
+        f.serial = int(i);
+        bool result = CALL_PUSH_IF_NOT_FULL(queue, f, i);
+        ASSERT( result, NULL );
+    }
+    ASSERT( unsigned(queue.SIZE())==n, NULL );
+    queue.clear();
+    ASSERT( queue.SIZE()==0, NULL );
+    for( size_t i=0; i<n; ++i ) {
+        Foo f;
+        f.serial = int(i);
+        bool result = CALL_PUSH_IF_NOT_FULL(queue, f, i);
+        ASSERT( result, NULL );
+    }
+    ASSERT( unsigned(queue.SIZE())==n, NULL );
+}
+
+#if TBB_DEPRECATED
+template<typename T>
+struct TestNegativeQueueBody: NoAssign {
+    tbb::concurrent_queue<T>& queue;
+    const int nthread;
+    TestNegativeQueueBody( tbb::concurrent_queue<T>& q, int n ) : queue(q), nthread(n) {}
+    void operator()( int k ) const {
+        if( k==0 ) {
+            int number_of_pops = nthread-1;
+            // Wait for all pops to pend.
+            while( queue.size()>-number_of_pops ) {
+                __TBB_Yield();
+            }
+            for( int i=0; ; ++i ) {
+                ASSERT( queue.size()==i-number_of_pops, NULL );
+                ASSERT( queue.empty()==(queue.size()<=0), NULL );
+                if( i==number_of_pops ) break;
+                // Satisfy another pop
+                queue.push( T() );
+            }
+        } else {
+            // Pop item from queue
+            T item;
+            queue.pop(item);
+        }
+    }
+};
+
+//! Test a queue with a negative size.
+template<typename T>
+void TestNegativeQueue( int nthread ) {
+    tbb::concurrent_queue<T> queue;
+    NativeParallelFor( nthread, TestNegativeQueueBody<T>(queue,nthread) );
+}
+#endif /* if TBB_DEPRECATED */
+
+#if TBB_USE_EXCEPTIONS
+void TestExceptions() {
+    typedef static_counting_allocator<std::allocator<FooEx>, size_t> allocator_t;
+    typedef static_counting_allocator<std::allocator<char>, size_t> allocator_char_t;
+    typedef tbb::concurrent_queue<FooEx, allocator_t> concur_queue_t;
+
+    enum methods {
+        m_push = 0,
+        m_pop
+    };  
+
+    REMARK("Testing exception safety\n");
+    // verify 'clear()' on exception; queue's destructor calls its clear()
+    // Do test on queues of two different types at the same time to 
+    // catch problem with incorrect sharing between templates.
+    {
+        concur_queue_t queue0;
+        tbb::concurrent_queue<int,allocator_t> queue1;
+        for( int i=0; i<2; ++i ) {
+            bool caught = false;
+            try {
+                allocator_char_t::init_counters();
+                allocator_char_t::set_limits(N/2);
+                for( int k=0; k<N; k++ ) {
+                    if( i==0 )
+                        queue0.push( FooEx() );
+                    else
+                        queue1.push( k );
+                }
+            } catch (...) {
+                caught = true;
+            }
+            ASSERT( caught, "call to push should have thrown exception" );
+        }
+    }
+    REMARK("... queue destruction test passed\n");
+
+    try {
+        int n_pushed=0, n_popped=0;
+        for(int t = 0; t <= 1; t++)// exception type -- 0 : from allocator(), 1 : from Foo's constructor
+        {
+            concur_queue_t queue_test;
+            for( int m=m_push; m<=m_pop; m++ ) {
+                // concurrent_queue internally rebinds the allocator to one with 'char'
+                allocator_char_t::init_counters();
+
+                if(t) MaxFooCount = MaxFooCount + 400;
+                else allocator_char_t::set_limits(N/2);
+
+                try {
+                    switch(m) {
+                    case m_push:
+                            for( int k=0; k<N; k++ ) {
+                                queue_test.push( FooEx() );
+                                n_pushed++;
+                            }
+                            break;
+                    case m_pop:
+                            n_popped=0;
+                            for( int k=0; k<n_pushed; k++ ) {
+                                FooEx elt;
+                                queue_test.try_pop( elt );
+                                n_popped++;
+                            }
+                            n_pushed = 0;
+                            allocator_char_t::set_limits(); 
+                            break;
+                    }
+                    if( !t && m==m_push ) ASSERT(false, "should throw an exception");
+                } catch ( Foo_exception & ) {
+                    switch(m) {
+                    case m_push: {
+                                ASSERT( ptrdiff_t(queue_test.SIZE())==n_pushed, "incorrect queue size" );
+                                long tc = MaxFooCount;
+                                MaxFooCount = 0;
+                                for( int k=0; k<(int)tc; k++ ) {
+                                    queue_test.push( FooEx() );
+                                    n_pushed++;
+                                }
+                                MaxFooCount = tc;
+                            }
+                            break;
+                    case m_pop:
+                            MaxFooCount = 0; // disable exception
+                            n_pushed -= (n_popped+1); // including one that threw an exception
+                            ASSERT( n_pushed>=0, "n_pushed cannot be less than 0" );
+                            for( int k=0; k<1000; k++ ) {
+                                queue_test.push( FooEx() );
+                                n_pushed++;
+                            }
+                            ASSERT( !queue_test.empty(), "queue must not be empty" );
+                            ASSERT( ptrdiff_t(queue_test.SIZE())==n_pushed, "queue size must be equal to n pushed" );
+                            for( int k=0; k<n_pushed; k++ ) {
+                                FooEx elt;
+                                queue_test.try_pop( elt );
+                            }
+                            ASSERT( queue_test.empty(), "queue must be empty" );
+                            ASSERT( queue_test.SIZE()==0, "queue must be empty" );
+                            break;
+                    }
+                } catch ( std::bad_alloc & ) {
+                    allocator_char_t::set_limits(); // disable exception from allocator
+                    size_t size = queue_test.SIZE();
+                    switch(m) {
+                    case m_push:
+                            ASSERT( size>0, "incorrect queue size");
+                            break;
+                    case m_pop:
+                            if( !t ) ASSERT( false, "should not throw an exceptin" );
+                            break;
+                    }
+                }
+                REMARK("... for t=%d and m=%d, exception test passed\n", t, m);
+            }
+        }
+    } catch(...) {
+        ASSERT(false, "unexpected exception");
+    }
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+template<typename T>
+struct TestQueueElements: NoAssign {
+    tbb::concurrent_queue<T>& queue;
+    const int nthread;
+    TestQueueElements( tbb::concurrent_queue<T>& q, int n ) : queue(q), nthread(n) {}
+    void operator()( int k ) const {
+        for( int i=0; i<1000; ++i ) {
+            if( (i&0x1)==0 ) {
+                __TBB_ASSERT( T(k)<T(nthread), NULL );
+                queue.push( T(k) );
+            } else {
+                // Pop item from queue
+                T item;
+                queue.try_pop(item);
+                __TBB_ASSERT( item<=T(nthread), NULL );
+            }
+        }
+    }
+};
+
+//! Test concurrent queue with primitive data type
+template<typename T>
+void TestPrimitiveTypes( int nthread, T exemplar )
+{
+    tbb::concurrent_queue<T> queue;
+    for( int i=0; i<100; ++i )
+        queue.push( exemplar );
+    NativeParallelFor( nthread, TestQueueElements<T>(queue,nthread) );
+}
+
+#include "harness_m128.h"
+
+#if HAVE_m128
+
+//! Test concurrent queue with SSE type
+/** Type Queue should be a queue of ClassWithSSE. */
+template<typename Queue>
+void TestSSE() {
+    Queue q1;
+    for( int i=0; i<100; ++i )
+        q1.push(ClassWithSSE(i));
+
+    // Copy the queue
+    Queue q2 = q1;
+    // Check that elements of the copy are correct
+    typename Queue::const_iterator ci = q2.unsafe_begin();
+    for( int i=0; i<100; ++i ) {
+        ClassWithSSE foo = *ci;
+        ASSERT( *ci==ClassWithSSE(i), NULL );
+        ++ci;
+    }
+
+    for( int i=0; i<101; ++i ) {
+        ClassWithSSE tmp;
+        bool b = q1.try_pop( tmp );
+        ASSERT( b==(i<100), NULL );
+        ASSERT( !b || tmp==ClassWithSSE(i), NULL );
+    }
+}
+#endif /* HAVE_m128 */
+
+int TestMain () {
+    TestEmptyQueue<char>();
+    TestEmptyQueue<Foo>();
+#if TBB_DEPRECATED
+    TestFullQueue();
+#endif
+    TestClear();
+    TestConcurrentQueueType();
+    TestIterator();
+    TestConstructors();
+
+    TestPrimitiveTypes( MaxThread, (char)1 );
+    TestPrimitiveTypes( MaxThread, (int)-12 );
+    TestPrimitiveTypes( MaxThread, (float)-1.2f );
+    TestPrimitiveTypes( MaxThread, (double)-4.3 );
+#if HAVE_m128
+    TestSSE<tbb::concurrent_queue<ClassWithSSE> >();
+    TestSSE<tbb::concurrent_bounded_queue<ClassWithSSE> >();
+#endif /* HAVE_m128 */
+
+    // Test concurrent operations
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+#if TBB_DEPRECATED
+        TestNegativeQueue<Foo>(nthread);
+#endif
+        for( size_t prefill=0; prefill<64; prefill+=(1+prefill/3) ) {
+            TestPushPop(prefill,ptrdiff_t(-1),nthread);
+            TestPushPop(prefill,ptrdiff_t(1),nthread);
+            TestPushPop(prefill,ptrdiff_t(2),nthread);
+            TestPushPop(prefill,ptrdiff_t(10),nthread);
+            TestPushPop(prefill,ptrdiff_t(100),nthread);
+        }
+    }
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception safety test is skipped.\n");
+#elif TBB_USE_EXCEPTIONS
+    TestExceptions();
+#endif /* TBB_USE_EXCEPTIONS */
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_unordered.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_unordered.cpp
new file mode 100644 (file)
index 0000000..fc32a5f
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/* Some tests in this source file are based on PPL tests provided by Microsoft. */
+
+#define __TBB_EXTRA_DEBUG 1
+#include "tbb/concurrent_unordered_map.h"
+#include "tbb/parallel_for.h"
+#include "tbb/tick_count.h"
+#include <stdio.h>
+#include "harness.h"
+#include "harness_allocator.h"
+
+using namespace std;
+
+typedef local_counting_allocator<debug_allocator<std::pair<const int,int>,std::allocator> > MyAllocator;
+typedef tbb::concurrent_unordered_map<int, int, tbb::tbb_hash<int>, std::equal_to<int>, MyAllocator> Mycumap;
+//typedef tbb::concurrent_unordered_map<int, int> Mycumap;
+//typedef concurrent_unordered_multimap<int, int> Mycummap;
+
+#define CheckAllocatorE(t,a,f) CheckAllocator(t,a,f,true,__LINE__)
+#define CheckAllocatorA(t,a,f) CheckAllocator(t,a,f,false,__LINE__)
+template<typename MyTable>
+inline void CheckAllocator(MyTable &table, size_t expected_allocs, size_t expected_frees, bool exact = true, int line = 0) {
+    typename MyTable::allocator_type a = table.get_allocator();
+    REMARK("#%d checking allocators: items %u/%u, allocs %u/%u\n", line,
+        unsigned(a.items_allocated), unsigned(a.items_freed), unsigned(a.allocations), unsigned(a.frees) );
+    ASSERT( a.items_allocated == a.allocations, NULL); ASSERT( a.items_freed == a.frees, NULL);
+    if(exact) {
+        ASSERT( a.allocations == expected_allocs, NULL); ASSERT( a.frees == expected_frees, NULL);
+    } else {
+        ASSERT( a.allocations >= expected_allocs, NULL); ASSERT( a.frees >= expected_frees, NULL);
+        ASSERT( a.allocations - a.frees == expected_allocs - expected_frees, NULL );
+    }
+}
+
+template <typename K, typename V = std::pair<const K, K> >
+struct ValueFactory {
+    static V make(const K &value) { return V(value, value); }
+    static K get(const V& value) { return value.second; }
+};
+
+template <typename T>
+struct ValueFactory<T, T> {
+    static T make(const T &value) { return value; }
+    static T get(const T &value) { return value; }
+};
+
+template <typename T>
+struct Value : ValueFactory<typename T::key_type, typename T::value_type> {};
+
+#if _MSC_VER
+#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced
+#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it
+#endif
+
+template<typename Iterator, typename RangeType>
+std::pair<int,int> CheckRecursiveRange(RangeType range) {
+    std::pair<int,int> sum(0, 0); // count, sum
+    for( Iterator i = range.begin(), e = range.end(); i != e; ++i ) {
+        ++sum.first; sum.second += i->second;
+    }
+    if( range.is_divisible() ) {
+        RangeType range2( range, tbb::split() );
+        std::pair<int,int> sum1 = CheckRecursiveRange<Iterator, RangeType>( range );
+        std::pair<int,int> sum2 = CheckRecursiveRange<Iterator, RangeType>( range2 );
+        sum1.first += sum2.first; sum1.second += sum2.second;
+        ASSERT( sum == sum1, "Mismatched ranges after division");
+    }
+    return sum;
+}
+
+template <typename T>
+struct SpecialTests {
+    static void Test() {}
+};
+
+template <>
+struct SpecialTests <Mycumap>
+{
+    static void Test()
+    {
+        Mycumap cont(0);
+        const Mycumap &ccont(cont);
+
+        // mapped_type& operator[](const key_type& k);
+        cont[1] = 2;
+
+        // bool empty() const;    
+        ASSERT(!ccont.empty(), "Concurrent container empty after adding an element");
+
+        // size_type size() const;
+        ASSERT(ccont.size() == 1, "Concurrent container size incorrect");
+
+        ASSERT(cont[1] == 2, "Concurrent container size incorrect");
+
+        // mapped_type& at( const key_type& k );
+        // const mapped_type& at(const key_type& k) const;
+        ASSERT(cont.at(1) == 2, "Concurrent container size incorrect");
+        ASSERT(ccont.at(1) == 2, "Concurrent container size incorrect");
+
+        // iterator find(const key_type& k);
+        Mycumap::const_iterator it = cont.find(1);
+        ASSERT(it != cont.end() && Value<Mycumap>::get(*(it)) == 2, "Element with key 1 not properly found");
+
+        REMARK("passed -- specialized concurrent unordered map tests\n");
+    }
+};
+
+template<typename T>
+void test_basic(const char * str)
+{
+    T cont;
+    const T &ccont(cont);
+
+    // bool empty() const;
+    ASSERT(ccont.empty(), "Concurrent container not empty after construction");
+
+    // size_type size() const;
+    ASSERT(ccont.size() == 0, "Concurrent container not empty after construction");
+
+    // size_type max_size() const;
+    ASSERT(ccont.max_size() > 0, "Concurrent container max size invalid");
+
+    //iterator begin();
+    //iterator end();
+    ASSERT(cont.begin() == cont.end(), "Concurrent container iterators invalid after construction");
+    ASSERT(ccont.begin() == ccont.end(), "Concurrent container iterators invalid after construction");
+    ASSERT(cont.cbegin() == cont.cend(), "Concurrent container iterators invalid after construction");
+
+    //std::pair<iterator, bool> insert(const value_type& obj);
+    std::pair<typename T::iterator, bool> ins = cont.insert(Value<T>::make(1));
+    ASSERT(ins.second == true && Value<T>::get(*(ins.first)) == 1, "Element 1 not properly inserted");
+
+    // bool empty() const;
+    ASSERT(!ccont.empty(), "Concurrent container empty after adding an element");
+
+    // size_type size() const;
+    ASSERT(ccont.size() == 1, "Concurrent container size incorrect");
+
+    std::pair<typename T::iterator, bool> ins2 = cont.insert(Value<T>::make(1));
+
+    if (T::allow_multimapping)
+    {
+        // std::pair<iterator, bool> insert(const value_type& obj);
+        ASSERT(ins2.second == true && Value<T>::get(*(ins2.first)) == 1, "Element 1 not properly inserted");
+
+        // size_type size() const;
+        ASSERT(ccont.size() == 2, "Concurrent container size incorrect");
+
+        // size_type count(const key_type& k) const;
+        ASSERT(ccont.count(1) == 2, "Concurrent container count(1) incorrect");
+
+        // std::pair<iterator, iterator> equal_range(const key_type& k);
+        std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
+        typename T::iterator it = range.first;
+        ASSERT(it != cont.end() && Value<T>::get(*it) == 1, "Element 1 not properly found");
+        unsigned int count = 0;
+        for (; it != range.second; it++)
+        {
+            count++;
+            ASSERT(Value<T>::get(*it) == 1, "Element 1 not properly found");
+        }
+
+        ASSERT(count == 2, "Range doesn't have the right number of elements");
+    }
+    else
+    {
+        // std::pair<iterator, bool> insert(const value_type& obj);
+        ASSERT(ins2.second == false && ins2.first == ins.first, "Element 1 should not be re-inserted");
+
+        // size_type size() const;
+        ASSERT(ccont.size() == 1, "Concurrent container size incorrect");
+
+        // size_type count(const key_type& k) const;
+        ASSERT(ccont.count(1) == 1, "Concurrent container count(1) incorrect");
+
+        // std::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+        // std::pair<iterator, iterator> equal_range(const key_type& k);
+        std::pair<typename T::iterator, typename T::iterator> range = cont.equal_range(1);
+        typename T::iterator i = range.first;
+        ASSERT(i != cont.end() && Value<T>::get(*i) == 1, "Element 1 not properly found");
+        ASSERT(++i == range.second, "Range doesn't have the right number of elements");
+    }
+
+    // const_iterator find(const key_type& k) const;
+    // iterator find(const key_type& k);
+    typename T::iterator it = cont.find(1);
+    ASSERT(it != cont.end() && Value<T>::get(*(it)) == 1, "Element 1 not properly found");
+    ASSERT(ccont.find(1) == it, "Element 1 not properly found");
+
+    // iterator insert(const_iterator hint, const value_type& obj);
+    typename T::iterator it2 = cont.insert(ins.first, Value<T>::make(2));
+    ASSERT(Value<T>::get(*it2) == 2, "Element 2 not properly inserted");
+
+    // T(const T& _Umap)
+    T newcont = ccont;
+    ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Copy construction did not copy the elements properly");
+
+    // size_type unsafe_erase(const key_type& k);
+    typename T::size_type size = cont.unsafe_erase(1);
+    ASSERT(T::allow_multimapping ? (size == 2) : (size == 1), "Erase did not remove the right number of elements");
+
+    // iterator unsafe_erase(const_iterator position);
+    typename T::iterator it4 = cont.unsafe_erase(cont.find(2));
+    ASSERT(it4 == cont.end() && cont.size() == 0, "Erase did not remove the last element properly");
+
+    // template<class InputIterator> void insert(InputIterator first, InputIterator last);
+    cont.insert(newcont.begin(), newcont.end());
+    ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Range insert did not copy the elements properly");
+
+    // iterator unsafe_erase(const_iterator first, const_iterator last);
+    std::pair<typename T::iterator, typename T::iterator> range2 = newcont.equal_range(1);
+    newcont.unsafe_erase(range2.first, range2.second);
+    ASSERT(newcont.size() == 1, "Range erase did not erase the elements properly");
+
+    // void clear();
+    newcont.clear();
+    ASSERT(newcont.begin() == newcont.end() && newcont.size() == 0, "Clear did not clear the container");
+
+    // T& operator=(const T& _Umap)
+    newcont = ccont;
+    ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Assignment operator did not copy the elements properly");
+
+    // void rehash(size_type n);
+    newcont.rehash(16);
+    ASSERT(T::allow_multimapping ? (newcont.size() == 3) : (newcont.size() == 2), "Rehash should not affect the container elements");
+
+    // float load_factor() const;
+    // float max_load_factor() const;
+    ASSERT(ccont.load_factor() <= ccont.max_load_factor(), "Load factor invalid");
+
+    // void max_load_factor(float z);
+    cont.max_load_factor(16.0f);
+    ASSERT(ccont.max_load_factor() == 16.0f, "Max load factor not properly changed");
+
+    // hasher hash_function() const;
+    ccont.hash_function();
+
+    // key_equal key_eq() const;
+    ccont.key_eq();
+
+    cont.clear();
+    CheckAllocatorA(cont, 1, 0); // one dummy is always allocated
+    for (int i = 0; i < 256; i++)
+    {
+        std::pair<typename T::iterator, bool> ins3 = cont.insert(Value<T>::make(i));
+        ASSERT(ins3.second == true && Value<T>::get(*(ins3.first)) == i, "Element 1 not properly inserted");
+    }
+    ASSERT(cont.size() == 256, "Wrong number of elements inserted");
+    ASSERT(256 == CheckRecursiveRange<typename T::iterator>(cont.range()).first, NULL);
+    ASSERT(256 == CheckRecursiveRange<typename T::const_iterator>(ccont.range()).first, NULL);
+
+    // size_type unsafe_bucket_count() const;
+    ASSERT(ccont.unsafe_bucket_count() == 16, "Wrong number of buckets");
+
+    // size_type unsafe_max_bucket_count() const;
+    ASSERT(ccont.unsafe_max_bucket_count() > 65536, "Wrong max number of buckets");
+
+    for (unsigned int i = 0; i < 256; i++)
+    {
+        typename T::size_type buck = ccont.unsafe_bucket(i);
+
+        // size_type unsafe_bucket(const key_type& k) const;
+        ASSERT(buck < 16, "Wrong bucket mapping");
+    }
+
+    for (unsigned int i = 0; i < 16; i++)
+    {
+        // size_type unsafe_bucket_size(size_type n);
+        ASSERT(cont.unsafe_bucket_size(i) == 16, "Wrong number elements in a bucket");
+
+        // local_iterator unsafe_begin(size_type n);
+        // const_local_iterator unsafe_begin(size_type n) const;
+        // local_iterator unsafe_end(size_type n);
+        // const_local_iterator unsafe_end(size_type n) const;
+        // const_local_iterator unsafe_cbegin(size_type n) const;
+        // const_local_iterator unsafe_cend(size_type n) const;
+        unsigned int count = 0;
+        for (typename T::iterator bit = cont.unsafe_begin(i); bit != cont.unsafe_end(i); bit++)
+        {
+            count++;
+        }
+        ASSERT(count == 16, "Bucket iterators are invalid");
+    }
+
+    // void swap(T&);
+    cont.swap(newcont);
+    ASSERT(newcont.size() == 256, "Wrong number of elements after swap");
+    ASSERT(newcont.count(200) == 1, "Element with key 200 not present after swap");
+    ASSERT(newcont.count(16) == 1, "Element with key 16 not present after swap");
+    ASSERT(newcont.count(99) == 1, "Element with key 99 not present after swap");
+    ASSERT(T::allow_multimapping ? (cont.size() == 3) : (cont.size() == 2), "Wrong number of elements after swap");
+
+    REMARK("passed -- basic %S tests\n", str);
+
+#if defined (VERBOSE)
+    REMARK("container dump debug:\n");
+    cont._Dump();
+    REMARK("container dump release:\n");
+    cont.dump();
+    REMARK("\n");
+#endif
+
+    SpecialTests<T>::Test();
+}
+
+void test_machine() {
+    ASSERT(__TBB_ReverseByte(0)==0, NULL );
+    ASSERT(__TBB_ReverseByte(1)==0x80, NULL );
+    ASSERT(__TBB_ReverseByte(0xFE)==0x7F, NULL );
+    ASSERT(__TBB_ReverseByte(0xFF)==0xFF, NULL );
+}
+
+template<typename T>
+class FillTable: NoAssign {
+    T &table;
+    const int items;
+    typedef std::pair<typename T::iterator, bool> pairIB;
+public:
+    FillTable(T &t, int i) : table(t), items(i) {
+        ASSERT( !(items&1) && items > 100, NULL);
+    }
+    void operator()(int threadn) const {
+        if( threadn == 0 ) { // Fill even keys forward (single thread)
+            bool last_inserted = true;
+            for( int i = 0; i < items; i+=2 ) {
+                pairIB pib = table.insert(Value<T>::make(i));
+                ASSERT(Value<T>::get(*(pib.first)) == i, "Element not properly inserted");
+                ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
+                last_inserted = pib.second;
+            }
+        } else if( threadn == 1 ) { // Fill even keys backward (single thread)
+            bool last_inserted = true;
+            for( int i = items-2; i >= 0; i-=2 ) {
+                pairIB pib = table.insert(Value<T>::make(i));
+                ASSERT(Value<T>::get(*(pib.first)) == i, "Element not properly inserted");
+                ASSERT( last_inserted || !pib.second, "Previous key was not inserted but this is inserted" );
+                last_inserted = pib.second;
+            }
+        } else if( !(threadn&1) ) { // Fill odd keys forward (multiple threads)
+            for( int i = 1; i < items; i+=2 ) {
+                pairIB pib = table.insert(Value<T>::make(i));
+                ASSERT(Value<T>::get(*(pib.first)) == i, "Element not properly inserted");
+            }
+        } else { // Check odd keys backward (multiple threads)
+            bool last_found = false;
+            for( int i = items-1; i >= 0; i-=2 ) {
+                typename T::iterator it = table.find(i);
+                if( it != table.end() ) { // found
+                    ASSERT(Value<T>::get(*it) == i, "Element not properly inserted");
+                    last_found = true;
+                } else ASSERT( !last_found, "Previous key was found but this is not" );
+            }
+        }
+    }
+};
+
+typedef tbb::atomic<unsigned char> AtomicByte;
+
+template<typename RangeType>
+struct ParallelTraverseBody: NoAssign {
+    const int n;
+    AtomicByte* const array;
+    ParallelTraverseBody( AtomicByte an_array[], int a_n ) : 
+        n(a_n), array(an_array)
+    {}
+    void operator()( const RangeType& range ) const {
+        for( typename RangeType::iterator i = range.begin(); i!=range.end(); ++i ) {
+            int k = i->first;
+            ASSERT( k == i->second, NULL );
+            ASSERT( 0<=k && k<n, NULL ); 
+            array[k]++;
+        }
+    }
+};
+
+void CheckRange( AtomicByte array[], int n ) {
+    for( int k=0; k<n; ++k ) {
+        if( array[k] != 1 ) {
+            REPORT("array[%d]=%d\n", k, int(array[k]));
+            ASSERT(false,NULL);
+        }
+    }
+}
+
+template<typename T>
+class CheckTable: NoAssign {
+    T &table;
+public:
+    CheckTable(T &t) : NoAssign(), table(t) {}
+    void operator()(int i) const {
+        int c = (int)table.count( i );
+        ASSERT( c, "must exist" );
+    }
+};
+
+template<typename T>
+void test_concurrent(const char *tablename) {
+#if TBB_USE_ASSERT
+    int items = 2000;
+#else
+    int items = 100000;
+#endif
+    T table(items/1000);
+    tbb::tick_count t0 = tbb::tick_count::now();
+    NativeParallelFor( 16/*min 6*/, FillTable<T>(table, items) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    REMARK( "time for filling '%s' by %d items = %g\n", tablename, items, (t1-t0).seconds() );
+    ASSERT( int(table.size()) == items, NULL);
+
+    AtomicByte* array = new AtomicByte[items];
+    memset( array, 0, items*sizeof(AtomicByte) );
+
+    typename T::range_type r = table.range();
+    ASSERT(items == CheckRecursiveRange<typename T::iterator>(r).first, NULL);
+    tbb::parallel_for( r, ParallelTraverseBody<typename T::const_range_type>( array, items ));
+    CheckRange( array, items );
+
+    const T &const_table = table;
+    memset( array, 0, items*sizeof(AtomicByte) );
+    typename T::const_range_type cr = const_table.range();
+    ASSERT(items == CheckRecursiveRange<typename T::const_iterator>(cr).first, NULL);
+    tbb::parallel_for( cr, ParallelTraverseBody<typename T::const_range_type>( array, items ));
+    CheckRange( array, items );
+    delete[] array;
+
+    tbb::parallel_for( 0, items, CheckTable<T>( table ) );
+  
+    table.clear();
+    CheckAllocatorA(table, items+1, items); // one dummy is always allocated
+}
+
+int TestMain () {
+    test_machine();
+    test_basic<Mycumap>("concurrent unordered map");
+    test_concurrent<Mycumap>("concurrent unordered map");
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_vector.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_concurrent_vector.cpp
new file mode 100644 (file)
index 0000000..f6fbfe7
--- /dev/null
@@ -0,0 +1,1016 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/concurrent_vector.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/cache_aligned_allocator.h"
+#include "tbb/tbb_exception.h"
+#include <cstdio>
+#include <cstdlib>
+#include "harness_report.h"
+#include "harness_assert.h"
+#include "harness_allocator.h"
+
+#if TBB_USE_EXCEPTIONS
+static bool known_issue_verbose = false;
+#define KNOWN_ISSUE(msg) if(!known_issue_verbose) known_issue_verbose = true, REPORT(msg)
+#endif /* TBB_USE_EXCEPTIONS */
+
+tbb::atomic<long> FooCount;
+long MaxFooCount = 0;
+
+//! Problem size
+const size_t N = 500000;
+
+//! Exception for concurrent_vector
+class Foo_exception : public std::bad_alloc {
+public:
+    virtual const char *what() const throw() { return "out of Foo limit"; }
+    virtual ~Foo_exception() throw() {}
+};
+
+static const int initial_value_of_bar = 42;
+struct Foo {
+    int my_bar;
+public:
+    enum State {
+        ZeroInitialized=0,
+        DefaultInitialized=0xDEFAUL,
+        CopyInitialized=0xC0314,
+        Destroyed=0xDEADF00
+    } state;
+    bool is_valid() const {
+        return state==DefaultInitialized||state==CopyInitialized;
+    }
+    bool is_valid_or_zero() const {
+        return is_valid()||(state==ZeroInitialized && !my_bar);
+    }
+    int& zero_bar() {
+        ASSERT( is_valid_or_zero(), NULL );
+        return my_bar;
+    }
+    int& bar() {
+        ASSERT( is_valid(), NULL );
+        return my_bar;
+    }
+    int bar() const {
+        ASSERT( is_valid(), NULL );
+        return my_bar;
+    }
+    Foo( int barr = initial_value_of_bar ) {
+        my_bar = barr;
+        if(MaxFooCount && FooCount >= MaxFooCount)
+            __TBB_THROW( Foo_exception() );
+        FooCount++;
+        state = DefaultInitialized;
+    }
+    Foo( const Foo& foo ) {
+        my_bar = foo.my_bar;
+        ASSERT( foo.is_valid_or_zero(), "bad source for copy" );
+        if(MaxFooCount && FooCount >= MaxFooCount)
+            __TBB_THROW( Foo_exception() );
+        FooCount++;
+        state = CopyInitialized;
+    }
+    ~Foo() {
+        ASSERT( is_valid_or_zero(), NULL );
+        my_bar = ~initial_value_of_bar;
+        if(state != ZeroInitialized) --FooCount;
+        state = Destroyed;
+    }
+    bool operator==(const Foo &f) const { return my_bar == f.my_bar; }
+    bool operator<(const Foo &f) const { return my_bar < f.my_bar; }
+    bool is_const() const {return true;}
+    bool is_const() {return false;}
+protected:
+    char reserve[1];
+    void operator=( const Foo& ) {}
+};
+
+class FooWithAssign: public Foo {
+public:
+    void operator=( const FooWithAssign& x ) {
+        my_bar = x.my_bar;
+        ASSERT( x.is_valid_or_zero(), "bad source for assignment" );
+        ASSERT( is_valid(), NULL );
+    } 
+    bool operator==(const Foo &f) const { return my_bar == f.my_bar; }
+    bool operator<(const Foo &f) const { return my_bar < f.my_bar; }
+};
+
+class FooIterator: public std::iterator<std::input_iterator_tag,FooWithAssign> {
+    int x_bar;
+public:
+    FooIterator(int x) {
+        x_bar = x;
+    }
+    FooIterator &operator++() {
+        x_bar++; return *this;
+    }
+    FooWithAssign operator*() {
+        FooWithAssign foo; foo.bar() = x_bar;
+        return foo;
+    }
+    bool operator!=(const FooIterator &i) { return x_bar != i.x_bar; }
+};
+
+inline void NextSize( int& s ) {
+    if( s<=32 ) ++s;
+    else s += s/10;
+}
+
+//! Check vector have expected size and filling
+template<typename vector_t>
+static void CheckVector( const vector_t& cv, size_t expected_size, size_t old_size ) {
+    ASSERT( cv.capacity()>=expected_size, NULL );
+    ASSERT( cv.size()==expected_size, NULL );
+    ASSERT( cv.empty()==(expected_size==0), NULL );
+    for( int j=0; j<int(expected_size); ++j ) {
+        if( cv[j].bar()!=~j )
+            REPORT("ERROR on line %d for old_size=%ld expected_size=%ld j=%d\n",__LINE__,long(old_size),long(expected_size),j);
+    }
+}
+
+//! Test of assign, grow, copying with various sizes
+void TestResizeAndCopy() {
+    typedef static_counting_allocator<debug_allocator<Foo,std::allocator>, std::size_t> allocator_t;
+    typedef tbb::concurrent_vector<Foo, allocator_t> vector_t;
+    allocator_t::init_counters();
+    for( int old_size=0; old_size<=128; NextSize( old_size ) ) {
+        for( int new_size=0; new_size<=1280; NextSize( new_size ) ) {
+            long count = FooCount;
+            vector_t v;
+            ASSERT( count==FooCount, NULL );
+            v.assign(old_size/2, Foo() );
+            ASSERT( count+old_size/2==FooCount, NULL );
+            for( int j=0; j<old_size/2; ++j )
+                ASSERT( v[j].state == Foo::CopyInitialized, NULL);
+            v.assign(FooIterator(0), FooIterator(old_size));
+            v.resize(new_size, Foo(33) );
+            ASSERT( count+new_size==FooCount, NULL );
+            for( int j=0; j<new_size; ++j ) {
+                int expected = j<old_size ? j : 33;
+                if( v[j].bar()!=expected ) 
+                    REPORT("ERROR on line %d for old_size=%ld new_size=%ld v[%ld].bar()=%d != %d\n",__LINE__,long(old_size),long(new_size),long(j),v[j].bar(), expected);
+            }
+            ASSERT( v.size()==size_t(new_size), NULL );
+            for( int j=0; j<new_size; ++j ) {
+                v[j].bar() = ~j;
+            }
+            const vector_t& cv = v;
+            // Try copy constructor
+            vector_t copy_of_v(cv);
+            CheckVector(cv,new_size,old_size);
+            ASSERT( !(v != copy_of_v), NULL );
+            v.clear();
+            ASSERT( v.empty(), NULL );
+            swap(v, copy_of_v);
+            ASSERT( copy_of_v.empty(), NULL );
+            CheckVector(v,new_size,old_size);
+        }
+    }
+    ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL);
+    ASSERT( allocator_t::allocations == allocator_t::frees, NULL);
+}
+
+//! Test reserve, compact, capacity
+void TestCapacity() {
+    typedef static_counting_allocator<debug_allocator<Foo,tbb::cache_aligned_allocator>, std::size_t> allocator_t;
+    typedef tbb::concurrent_vector<Foo, allocator_t> vector_t;
+    allocator_t::init_counters();
+    for( size_t old_size=0; old_size<=11000; old_size=(old_size<5 ? old_size+1 : 3*old_size) ) {
+        for( size_t new_size=0; new_size<=11000; new_size=(new_size<5 ? new_size+1 : 3*new_size) ) {
+            long count = FooCount; 
+            {
+                vector_t v; v.reserve(old_size);
+                ASSERT( v.capacity()>=old_size, NULL );
+                v.reserve( new_size );
+                ASSERT( v.capacity()>=old_size, NULL );
+                ASSERT( v.capacity()>=new_size, NULL );
+                ASSERT( v.empty(), NULL );
+                size_t fill_size = 2*new_size;
+                for( size_t i=0; i<fill_size; ++i ) {
+                    ASSERT( size_t(FooCount)==count+i, NULL );
+#if TBB_DEPRECATED
+                    size_t j = v.grow_by(1);
+#else
+                    size_t j = v.grow_by(1) - v.begin();
+#endif
+                    ASSERT( j==i, NULL );
+                    v[j].bar() = int(~j);
+                }
+                vector_t copy_of_v(v); // should allocate first segment with same size as for shrink_to_fit()
+                if(__TBB_Log2(/*reserved size*/old_size|1) > __TBB_Log2(fill_size|1) )
+                    ASSERT( v.capacity() != copy_of_v.capacity(), NULL );
+                v.shrink_to_fit();
+                ASSERT( v.capacity() == copy_of_v.capacity(), NULL );
+                CheckVector(v, new_size*2, old_size); // check vector correctness
+                ASSERT( v==copy_of_v, NULL ); // TODO: check also segments layout equality
+            }
+            ASSERT( FooCount==count, NULL );
+        }
+    } 
+    ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL);
+    ASSERT( allocator_t::allocations == allocator_t::frees, NULL);
+}
+
+struct AssignElement {
+    typedef tbb::concurrent_vector<int>::range_type::iterator iterator;
+    iterator base;
+    void operator()( const tbb::concurrent_vector<int>::range_type& range ) const {
+        for( iterator i=range.begin(); i!=range.end(); ++i ) {
+            if( *i!=0 )
+                REPORT("ERROR for v[%ld]\n", long(i-base));
+            *i = int(i-base);
+        }
+    }
+    AssignElement( iterator base_ ) : base(base_) {}
+};
+
+struct CheckElement {
+    typedef tbb::concurrent_vector<int>::const_range_type::iterator iterator;
+    iterator base;
+    void operator()( const tbb::concurrent_vector<int>::const_range_type& range ) const {
+        for( iterator i=range.begin(); i!=range.end(); ++i )
+            if( *i != int(i-base) )
+                REPORT("ERROR for v[%ld]\n", long(i-base));
+    }
+    CheckElement( iterator base_ ) : base(base_) {}
+};
+
+#include "tbb/tick_count.h"
+#include "tbb/parallel_for.h"
+#include "harness.h"
+
+//! Test parallel access by iterators
+void TestParallelFor( int nthread ) {
+    typedef tbb::concurrent_vector<int> vector_t;
+    vector_t v;
+    v.resize(N);
+    tbb::tick_count t0 = tbb::tick_count::now();
+    REMARK("Calling parallel_for with %ld threads\n",long(nthread));
+    tbb::parallel_for( v.range(10000), AssignElement(v.begin()) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    const vector_t& u = v;
+    tbb::parallel_for( u.range(10000), CheckElement(u.begin()) );
+    tbb::tick_count t2 = tbb::tick_count::now();
+    REMARK("Time for parallel_for: assign time = %8.5f, check time = %8.5f\n",
+               (t1-t0).seconds(),(t2-t1).seconds());
+    for( long i=0; size_t(i)<v.size(); ++i )
+        if( v[i]!=i )
+            REPORT("ERROR for v[%ld]\n", i);
+}
+
+template<typename Iterator1, typename Iterator2>
+void TestIteratorAssignment( Iterator2 j ) {
+    Iterator1 i(j);
+    ASSERT( i==j, NULL );
+    ASSERT( !(i!=j), NULL );
+    Iterator1 k;
+    k = j;
+    ASSERT( k==j, NULL );
+    ASSERT( !(k!=j), NULL );
+}
+
+template<typename Range1, typename Range2>
+void TestRangeAssignment( Range2 r2 ) {
+    Range1 r1(r2); r1 = r2;
+}
+
+template<typename Iterator, typename T>
+void TestIteratorTraits() {
+    AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) ); 
+    AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) ); 
+    AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) ); 
+    AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::random_access_iterator_tag*>(0) );
+    T x;
+    typename Iterator::reference xr = x;
+    typename Iterator::pointer xp = &x;
+    ASSERT( &xr==xp, NULL );
+}
+
+template<typename Vector, typename Iterator>
+void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) {
+    typename Vector::const_reference pref = *cp;
+    if( pref.bar()!=i )
+        REPORT("ERROR for u[%ld] using const_iterator\n", long(i));
+    typename Vector::difference_type delta = cp-u.begin();
+    ASSERT( delta==i, NULL );
+    if( u[i].bar()!=i )
+        REPORT("ERROR for u[%ld] using subscripting\n", long(i));
+    ASSERT( u.begin()[i].bar()==i, NULL );
+}
+
+template<typename Iterator1, typename Iterator2, typename V> 
+void CheckIteratorComparison( V& u ) {
+    V u2 = u;
+    Iterator1 i = u.begin();
+
+    for( int i_count=0; i_count<100; ++i_count ) {
+        Iterator2 j = u.begin();
+        Iterator2 i2 = u2.begin();
+        for( int j_count=0; j_count<100; ++j_count ) {
+            ASSERT( (i==j)==(i_count==j_count), NULL );
+            ASSERT( (i!=j)==(i_count!=j_count), NULL );
+            ASSERT( (i-j)==(i_count-j_count), NULL );
+            ASSERT( (i<j)==(i_count<j_count), NULL );
+            ASSERT( (i>j)==(i_count>j_count), NULL );
+            ASSERT( (i<=j)==(i_count<=j_count), NULL );
+            ASSERT( (i>=j)==(i_count>=j_count), NULL );
+            ASSERT( !(i==i2), NULL ); 
+            ASSERT( i!=i2, NULL ); 
+            ++j;
+            ++i2;
+        }
+        ++i;
+    }
+}
+
+//! Test sequential iterators for vector type V.
+/** Also does timing. */
+template<typename T>
+void TestSequentialFor() {
+    typedef tbb::concurrent_vector<FooWithAssign> V;
+    V v(N);
+    ASSERT(v.grow_by(0) == v.grow_by(0, FooWithAssign()), NULL);
+
+    // Check iterator 
+    tbb::tick_count t0 = tbb::tick_count::now();
+    typename V::iterator p = v.begin();
+    ASSERT( !(*p).is_const(), NULL );
+    ASSERT( !p->is_const(), NULL );
+    for( int i=0; size_t(i)<v.size(); ++i, ++p ) {
+        if( (*p).state!=Foo::DefaultInitialized )
+            REPORT("ERROR for v[%ld]\n", long(i));
+        typename V::reference pref = *p;
+        pref.bar() = i;
+        typename V::difference_type delta = p-v.begin();
+        ASSERT( delta==i, NULL );
+        ASSERT( -delta<=0, "difference type not signed?" );
+    }
+    tbb::tick_count t1 = tbb::tick_count::now();
+    
+    // Check const_iterator going forwards
+    const V& u = v;
+    typename V::const_iterator cp = u.begin();
+    ASSERT( cp == v.cbegin(), NULL );
+    ASSERT( (*cp).is_const(), NULL );
+    ASSERT( cp->is_const(), NULL );
+    ASSERT( *cp == v.front(), NULL);
+    for( int i=0; size_t(i)<u.size(); ++i ) {
+        CheckConstIterator(u,i,cp);
+        V::const_iterator &cpr = ++cp;
+        ASSERT( &cpr == &cp, "preincrement not returning a reference?");
+    }
+    tbb::tick_count t2 = tbb::tick_count::now();
+    REMARK("Time for serial for:  assign time = %8.5f, check time = %8.5f\n",
+               (t1-t0).seconds(),(t2-t1).seconds());
+
+    // Now go backwards
+    cp = u.end();
+    ASSERT( cp == v.cend(), NULL );
+    for( int i=int(u.size()); i>0; ) {
+        --i;
+        V::const_iterator &cpr = --cp;
+        ASSERT( &cpr == &cp, "predecrement not returning a reference?");
+        if( i>0 ) {
+            typename V::const_iterator cp_old = cp--;
+            int here = (*cp_old).bar();
+            ASSERT( here==u[i].bar(), NULL );
+            typename V::const_iterator cp_new = cp++;
+            int prev = (*cp_new).bar();
+            ASSERT( prev==u[i-1].bar(), NULL );
+        }
+        CheckConstIterator(u,i,cp);
+    }
+
+    // Now go forwards and backwards
+    ptrdiff_t k = 0;
+    cp = u.begin();
+    for( size_t i=0; i<u.size(); ++i ) {
+        CheckConstIterator(u,int(k),cp);
+        typename V::difference_type delta = i*3 % u.size();
+        if( 0<=k+delta && size_t(k+delta)<u.size() ) {
+            V::const_iterator &cpr = (cp += delta);
+            ASSERT( &cpr == &cp, "+= not returning a reference?");
+            k += delta; 
+        } 
+        delta = i*7 % u.size();
+        if( 0<=k-delta && size_t(k-delta)<u.size() ) {
+            if( i&1 ) { 
+                V::const_iterator &cpr = (cp -= delta);
+                ASSERT( &cpr == &cp, "-= not returning a reference?");
+            } else
+                cp = cp - delta;        // Test operator-
+            k -= delta; 
+        } 
+    }
+    
+    for( int i=0; size_t(i)<u.size(); i=(i<50?i+1:i*3) )
+        for( int j=-i; size_t(i+j)<u.size(); j=(j<50?j+1:j*5) ) {
+            ASSERT( (u.begin()+i)[j].bar()==i+j, NULL );
+            ASSERT( (v.begin()+i)[j].bar()==i+j, NULL );
+            ASSERT((v.cbegin()+i)[j].bar()==i+j, NULL );
+            ASSERT( (i+u.begin())[j].bar()==i+j, NULL );
+            ASSERT( (i+v.begin())[j].bar()==i+j, NULL );
+            ASSERT((i+v.cbegin())[j].bar()==i+j, NULL );
+        }
+
+    CheckIteratorComparison<typename V::iterator, typename V::iterator>(v);
+    CheckIteratorComparison<typename V::iterator, typename V::const_iterator>(v);
+    CheckIteratorComparison<typename V::const_iterator, typename V::iterator>(v);
+    CheckIteratorComparison<typename V::const_iterator, typename V::const_iterator>(v);
+
+    TestIteratorAssignment<typename V::const_iterator>( u.begin() );
+    TestIteratorAssignment<typename V::const_iterator>( v.begin() );
+    TestIteratorAssignment<typename V::const_iterator>( v.cbegin() );
+    TestIteratorAssignment<typename V::iterator>( v.begin() );
+    // doesn't compile as expected: TestIteratorAssignment<typename V::iterator>( u.begin() );
+
+    TestRangeAssignment<typename V::const_range_type>( u.range() );
+    TestRangeAssignment<typename V::const_range_type>( v.range() );
+    TestRangeAssignment<typename V::range_type>( v.range() );
+    // doesn't compile as expected: TestRangeAssignment<typename V::range_type>( u.range() );
+
+    // Check reverse_iterator 
+    typename V::reverse_iterator rp = v.rbegin();
+    for( size_t i=v.size(); i>0; --i, ++rp ) {
+        typename V::reference pref = *rp;
+        ASSERT( size_t(pref.bar())==i-1, NULL );
+        ASSERT( rp!=v.rend(), NULL );
+    }
+    ASSERT( rp==v.rend(), NULL );
+    
+    // Check const_reverse_iterator 
+    typename V::const_reverse_iterator crp = u.rbegin();
+    ASSERT( crp == v.crbegin(), NULL );
+    ASSERT( *crp == v.back(), NULL);
+    for( size_t i=v.size(); i>0; --i, ++crp ) {
+        typename V::const_reference cpref = *crp;
+        ASSERT( size_t(cpref.bar())==i-1, NULL );
+        ASSERT( crp!=u.rend(), NULL );
+    }
+    ASSERT( crp == u.rend(), NULL );
+    ASSERT( crp == v.crend(), NULL );
+
+    TestIteratorAssignment<typename V::const_reverse_iterator>( u.rbegin() );
+    TestIteratorAssignment<typename V::reverse_iterator>( v.rbegin() );
+
+    // test compliance with C++ Standard 2003, clause 23.1.1p9
+    {
+        tbb::concurrent_vector<int> v1, v2(1, 100);
+        v1.assign(1, 100); ASSERT(v1 == v2, NULL);
+        ASSERT(v1.size() == 1 && v1[0] == 100, "used integral iterators");
+    }
+
+    // cross-allocator tests
+#if !defined(_WIN64) || defined(_CPPLIB_VER)
+    typedef local_counting_allocator<std::allocator<int>, size_t> allocator1_t;
+    typedef tbb::cache_aligned_allocator<void> allocator2_t;
+    typedef tbb::concurrent_vector<FooWithAssign, allocator1_t> V1;
+    typedef tbb::concurrent_vector<FooWithAssign, allocator2_t> V2;
+    V1 v1( v ); // checking cross-allocator copying
+    V2 v2( 10 ); v2 = v1; // checking cross-allocator assignment
+    ASSERT( (v1 == v) && !(v2 != v), NULL);
+    ASSERT( !(v1 < v) && !(v2 > v), NULL);
+    ASSERT( (v1 <= v) && (v2 >= v), NULL);
+#endif
+}
+
+static const size_t Modulus = 7;
+
+typedef static_counting_allocator<debug_allocator<Foo> > MyAllocator;
+typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector;
+
+template<typename MyVector>
+class GrowToAtLeast: NoAssign {
+    MyVector& my_vector;
+public:
+    void operator()( const tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            size_t n = my_vector.size();
+            size_t req = (i % (2*n+1))+1;
+#if TBB_DEPRECATED
+            my_vector.grow_to_at_least(req);
+#else
+            typename MyVector::iterator p = my_vector.grow_to_at_least(req);
+            if( p-my_vector.begin() < typename MyVector::difference_type(req) )
+                ASSERT( p->state == Foo::DefaultInitialized || p->state == Foo::ZeroInitialized, NULL);
+#endif
+            ASSERT( my_vector.size()>=req, NULL );
+        }
+    }
+    GrowToAtLeast( MyVector& vector ) : my_vector(vector) {}
+};
+
+void TestConcurrentGrowToAtLeast() {
+    typedef static_counting_allocator< tbb::zero_allocator<Foo> > MyAllocator;
+    typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector;
+    MyAllocator::init_counters();
+    MyVector v(2, Foo(), MyAllocator());
+    for( size_t s=1; s<1000; s*=10 ) {
+        tbb::parallel_for( tbb::blocked_range<size_t>(0,10000*s,s), GrowToAtLeast<MyVector>(v), tbb::simple_partitioner() );
+    }
+    v.clear();
+    ASSERT( 0 == v.get_allocator().frees, NULL);
+    v.shrink_to_fit();
+    size_t items_allocated = v.get_allocator().items_allocated,
+           items_freed = v.get_allocator().items_freed;
+    size_t allocations = v.get_allocator().allocations,
+           frees = v.get_allocator().frees;
+    ASSERT( items_allocated == items_freed, NULL);
+    ASSERT( allocations == frees, NULL);
+}
+
+//! Test concurrent invocations of method concurrent_vector::grow_by
+template<typename MyVector>
+class GrowBy: NoAssign {
+    MyVector& my_vector;
+public:
+    void operator()( const tbb::blocked_range<int>& range ) const {
+        ASSERT( range.begin() < range.end(), NULL );
+#if TBB_DEPRECATED
+        for( int i=range.begin(); i!=range.end(); ++i )
+#else
+        int i = range.begin(), h = (range.end() - i) / 2;
+        typename MyVector::iterator s = my_vector.grow_by(h);
+        for( h += i; i < h; ++i, ++s )
+            s->bar() = i;
+        for(; i!=range.end(); ++i )
+#endif
+        {
+            if( i&1 ) {
+#if TBB_DEPRECATED
+                typename MyVector::reference element = my_vector[my_vector.grow_by(1)]; 
+                element.bar() = i;
+#else
+                my_vector.grow_by(1)->bar() = i;
+#endif
+            } else {
+                typename MyVector::value_type f;
+                f.bar() = i;
+#if TBB_DEPRECATED
+                size_t r;
+#else
+                typename MyVector::iterator r;
+#endif
+                if( i&2 )
+                    r = my_vector.push_back( f );
+                else
+                    r = my_vector.grow_by(1, f);
+#if TBB_DEPRECATED
+                ASSERT( my_vector[r].bar()==i, NULL );
+#else
+                ASSERT( r->bar()==i, NULL );
+#endif
+            }
+        }
+    }
+    GrowBy( MyVector& vector ) : my_vector(vector) {}
+};
+
+//! Test concurrent invocations of method concurrent_vector::grow_by
+void TestConcurrentGrowBy( int nthread ) {
+    MyAllocator::init_counters();
+    {
+        int m = 100000; MyAllocator a;
+        MyVector v( a );
+        tbb::parallel_for( tbb::blocked_range<int>(0,m,100), GrowBy<MyVector>(v), tbb::simple_partitioner() );
+        ASSERT( v.size()==size_t(m), NULL );
+
+        // Verify that v is a permutation of 0..m
+        int inversions = 0, def_inits = 0, copy_inits = 0;
+        bool* found = new bool[m];
+        memset( found, 0, m );
+        for( int i=0; i<m; ++i ) {
+            if( v[i].state == Foo::DefaultInitialized ) ++def_inits;
+            else if( v[i].state == Foo::CopyInitialized ) ++copy_inits;
+            else {
+                REMARK("i: %d ", i);
+                ASSERT( false, "v[i] seems not initialized");
+            }
+            int index = v[i].bar();
+            ASSERT( !found[index], NULL );
+            found[index] = true;
+            if( i>0 )
+                inversions += v[i].bar()<v[i-1].bar();
+        }
+        for( int i=0; i<m; ++i ) {
+            ASSERT( found[i], NULL );
+            ASSERT( nthread>1 || v[i].bar()==i, "sequential execution is wrong" );
+        }
+        delete[] found;
+        REMARK("Initialization by default constructor: %d, by copy: %d\n", def_inits, copy_inits);
+        ASSERT( def_inits >= m/2, NULL );
+        ASSERT( copy_inits >= m/4, NULL );
+        if( nthread>1 && inversions<m/20 )
+            REPORT("Warning: not much concurrency in TestConcurrentGrowBy (%d inversions)\n", inversions);
+    }
+    size_t items_allocated = MyAllocator::items_allocated,
+           items_freed = MyAllocator::items_freed;
+    size_t allocations = MyAllocator::allocations,
+           frees = MyAllocator::frees;
+    ASSERT( items_allocated == items_freed, NULL);
+    ASSERT( allocations == frees, NULL);
+}
+
+//! Test the assignment operator and swap
+void TestAssign() {
+    typedef tbb::concurrent_vector<FooWithAssign, local_counting_allocator<std::allocator<FooWithAssign>, size_t > > vector_t;
+    local_counting_allocator<std::allocator<FooWithAssign>, size_t > init_alloc;
+    init_alloc.allocations = 100;
+    for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) {
+        for( int src_size=2; src_size<=128; NextSize( src_size ) ) {
+            vector_t u(FooIterator(0), FooIterator(src_size), init_alloc);
+            for( int i=0; i<src_size; ++i )
+                ASSERT( u[i].bar()==i, NULL );
+            vector_t v(dst_size, FooWithAssign(), init_alloc);
+            for( int i=0; i<dst_size; ++i ) {
+                ASSERT( v[i].state==Foo::CopyInitialized, NULL );
+                v[i].bar() = ~i;
+            }
+            ASSERT( v != u, NULL);
+            v.swap(u);
+            CheckVector(u, dst_size, src_size);
+            u.swap(v);
+            // using assignment
+            v = u;
+            ASSERT( v == u, NULL);
+            u.clear();
+            ASSERT( u.size()==0, NULL );
+            ASSERT( v.size()==size_t(src_size), NULL );
+            for( int i=0; i<src_size; ++i )
+                ASSERT( v[i].bar()==i, NULL );
+            ASSERT( 0 == u.get_allocator().frees, NULL);
+            u.shrink_to_fit(); // deallocate unused memory
+            size_t items_allocated = u.get_allocator().items_allocated,
+                   items_freed = u.get_allocator().items_freed;
+            size_t allocations = u.get_allocator().allocations,
+                   frees = u.get_allocator().frees + 100;
+            ASSERT( items_allocated == items_freed, NULL);
+            ASSERT( allocations == frees, NULL);
+        }
+    }
+}
+
+// Test the comparison operators
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <string>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+void TestComparison() {
+    std::string str[3]; str[0] = "abc";
+    str[1].assign("cba");
+    str[2].assign("abc"); // same as 0th
+    tbb::concurrent_vector<char> var[3];
+    var[0].assign(str[0].begin(), str[0].end());
+    var[1].assign(str[0].rbegin(), str[0].rend());
+    var[2].assign(var[1].rbegin(), var[1].rend()); // same as 0th
+    for (int i = 0; i < 3; ++i) {
+        for (int j = 0; j < 3; ++j) {
+            ASSERT( (var[i] == var[j]) == (str[i] == str[j]), NULL );
+            ASSERT( (var[i] != var[j]) == (str[i] != str[j]), NULL );
+            ASSERT( (var[i] < var[j]) == (str[i] < str[j]), NULL );
+            ASSERT( (var[i] > var[j]) == (str[i] > str[j]), NULL );
+            ASSERT( (var[i] <= var[j]) == (str[i] <= str[j]), NULL );
+            ASSERT( (var[i] >= var[j]) == (str[i] >= str[j]), NULL );
+        }
+    }
+}
+
+//------------------------------------------------------------------------
+// Regression test for problem where on oversubscription caused
+// concurrent_vector::grow_by to run very slowly (TR#196).
+//------------------------------------------------------------------------
+
+#include "tbb/task_scheduler_init.h"
+#include <math.h>
+
+typedef unsigned long Number;
+
+static tbb::concurrent_vector<Number> Primes;
+
+class FindPrimes {
+    bool is_prime( Number val ) const {
+        int limit, factor = 3;
+        if( val<5u ) 
+            return val==2;
+        else {
+            limit = long(sqrtf(float(val))+0.5f);
+            while( factor<=limit && val % factor )
+                ++factor;
+            return factor>limit;
+        }
+    }
+public:
+    void operator()( const tbb::blocked_range<Number>& r ) const {
+        for( Number i=r.begin(); i!=r.end(); ++i ) { 
+            if( i%2 && is_prime(i) ) {
+#if TBB_DEPRECATED
+                Primes[Primes.grow_by(1)] = i;
+#else
+                Primes.push_back( i );
+#endif
+            }
+        }
+    }
+};
+
+double TimeFindPrimes( int nthread ) {
+    Primes.clear();
+    Primes.reserve(1000000);// TODO: or compact()?
+    tbb::task_scheduler_init init(nthread);
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for( tbb::blocked_range<Number>(0,1000000,500), FindPrimes() );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    return (t1-t0).seconds();
+}
+
+void TestFindPrimes() {
+    // Time fully subscribed run.
+    double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic );
+
+    // Time parallel run that is very likely oversubscribed.  
+#if _XBOX
+    double t128 = TimeFindPrimes(32);  //XBOX360 can't handle too many threads
+#else    
+    double t128 = TimeFindPrimes(128);
+#endif
+    REMARK("TestFindPrimes: t2==%g t128=%g k=%g\n", t2, t128, t128/t2);
+
+    // We allow the 128-thread run a little extra time to allow for thread overhead.
+    // Theoretically, following test will fail on machine with >128 processors.
+    // But that situation is not going to come up in the near future,
+    // and the generalization to fix the issue is not worth the trouble.
+    if( t128 > 1.3*t2 ) {
+        REPORT("Warning: grow_by is pathetically slow: t2==%g t128=%g k=%g\n", t2, t128, t128/t2);
+    } 
+}
+
+//------------------------------------------------------------------------
+// Test compatibility with STL sort.
+//------------------------------------------------------------------------
+
+#include <algorithm>
+
+void TestSort() {
+    for( int n=0; n<100; n=n*3+1 ) {
+        tbb::concurrent_vector<int> array(n);
+        for( int i=0; i<n; ++i )
+            array.at(i) = (i*7)%n;
+        std::sort( array.begin(), array.end() );
+        for( int i=0; i<n; ++i )
+            ASSERT( array[i]==i, NULL );
+    }
+}
+
+#if TBB_USE_EXCEPTIONS
+//------------------------------------------------------------------------
+// Test exceptions safety (from allocator and items constructors)
+//------------------------------------------------------------------------
+void TestExceptions() {
+    typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
+    typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t;
+
+    enum methods {
+        zero_method = 0,
+        ctor_copy, ctor_size, assign_nt, assign_ir, op_equ, reserve, compact, grow,
+        all_methods
+    };
+    ASSERT( !FooCount, NULL );
+
+    try {
+        vector_t src(FooIterator(0), FooIterator(N)); // original data
+
+        for(int t = 0; t < 2; ++t) // exception type
+        for(int m = zero_method+1; m < all_methods; ++m)
+        {
+            ASSERT( FooCount == N, "Previous iteration miss some Foo's de-/initialization" );
+            allocator_t::init_counters();
+            if(t) MaxFooCount = FooCount + N/4;
+            else allocator_t::set_limits(N/4);
+            vector_t victim;
+            try {
+                switch(m) {
+                case ctor_copy: {
+                        vector_t acopy(src);
+                    } break; // auto destruction after exception is checked by ~Foo
+                case ctor_size: {
+                        vector_t sized(N);
+                    } break; // auto destruction after exception is checked by ~Foo
+                // Do not test assignment constructor due to reusing of same methods as below 
+                case assign_nt: {
+                        victim.assign(N, FooWithAssign());
+                    } break;
+                case assign_ir: {
+                        victim.assign(FooIterator(0), FooIterator(N));
+                    } break;
+                case op_equ: {
+                        victim.reserve(2); victim = src; // fragmented assignment
+                    } break;
+                case reserve: {
+                        try {
+                            victim.reserve(victim.max_size()+1);
+                        } catch(std::length_error &) {
+                        } catch(...) {
+                            KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n");
+                        }
+                        victim.reserve(N);
+                    } break;
+                case compact: {
+                        if(t) MaxFooCount = 0; else allocator_t::set_limits(); // reset limits
+                        victim.reserve(2); victim = src; // fragmented assignment
+                        if(t) MaxFooCount = FooCount + 10; else allocator_t::set_limits(1, false); // block any allocation, check NULL return from allocator
+                        victim.shrink_to_fit(); // should start defragmenting first segment
+                    } break;
+                case grow: {
+                        tbb::task_scheduler_init init(2);
+                        if(t) MaxFooCount = FooCount + 31; // these numbers help to reproduce the live lock for versions < TBB2.2
+                        try {
+                            tbb::parallel_for( tbb::blocked_range<int>(0, N, 70), GrowBy<vector_t>(victim) );
+                        } catch(...) {
+#if TBB_USE_CAPTURED_EXCEPTION
+                            throw tbb::bad_last_alloc();
+#else
+                            throw;
+#endif
+                        }
+                    } break;
+                default:;
+                }
+                if(!t || m != reserve) ASSERT(false, "should throw an exception");
+            } catch(std::bad_alloc &e) {
+                allocator_t::set_limits(); MaxFooCount = 0;
+                size_t capacity = victim.capacity();
+                size_t size = victim.size();
+#if TBB_DEPRECATED
+                size_t req_size = victim.grow_by(0);
+#else
+                size_t req_size = victim.grow_by(0) - victim.begin();
+#endif
+                ASSERT( size <= capacity, NULL);
+                ASSERT( req_size >= size, NULL);
+                switch(m) {
+                case reserve:
+                    if(t) ASSERT(false, NULL);
+                case assign_nt:
+                case assign_ir:
+                    if(!t) {
+                        ASSERT(capacity < N/2, "unexpected capacity");
+                        ASSERT(size == 0, "unexpected size");
+                        break;
+                    } else {
+                        ASSERT(size == N, "unexpected size");
+                        ASSERT(capacity >= N, "unexpected capacity");
+                        int i;
+                        for(i = 1; ; ++i)
+                            if(!victim[i].zero_bar()) break;
+                            else ASSERT(victim[i].bar() == (m == assign_ir)? i : initial_value_of_bar, NULL);
+                        for(; size_t(i) < size; ++i) ASSERT(!victim[i].zero_bar(), NULL);
+                        ASSERT(size_t(i) == size, NULL);
+                        break;
+                    }
+                case grow:
+                case op_equ:
+                    if(!t) {
+                        ASSERT(capacity > 0, NULL);
+                        ASSERT(capacity < N, "unexpected capacity");
+                    }
+                    {
+                        vector_t copy_of_victim(victim);
+                        ASSERT(copy_of_victim.size() > 0, NULL);
+                        for(int i = 0; ; ++i) {
+                            try {
+                                FooWithAssign &foo = victim.at(i);
+                                if( !foo.is_valid_or_zero() ) {
+                                    std::printf("i: %d size: %u req_size: %u  state: %d\n", i, unsigned(size), unsigned(req_size), foo.state);
+                                }
+                                int bar = foo.zero_bar();
+                                if(m != grow) ASSERT( bar == i || (t && bar == 0), NULL);
+                                if(size_t(i) < copy_of_victim.size()) ASSERT( copy_of_victim[i].bar() == bar, NULL);
+                            } catch(std::range_error &) { // skip broken segment
+                                ASSERT( size_t(i) < req_size, NULL );
+                                if(m == op_equ) break;
+                            } catch(std::out_of_range &){
+                                ASSERT( i > 0, NULL ); break;
+                            } catch(...) {
+                                KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n"); break;
+                            }
+                        }
+                        vector_t copy_of_victim2(10); copy_of_victim2 = victim;
+                        ASSERT(copy_of_victim == copy_of_victim2, "assignment doesn't match copying");
+                        if(m == op_equ) {
+                            try {
+                                victim = copy_of_victim2;
+                            } catch(tbb::bad_last_alloc &) { break;
+                            } catch(...) {
+                                KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n"); break;
+                            }
+                            ASSERT(t, NULL);
+                        }
+                    } break;
+                case compact:
+                    ASSERT(capacity > 0, "unexpected capacity");
+                    ASSERT(victim == src, "shrink_to_fit() is broken");
+                    break;
+
+                default:; // nothing to check here
+                }
+                REMARK("Exception %d: %s\t- ok\n", m, e.what());
+            }
+        }
+    } catch(...) {
+        ASSERT(false, "unexpected exception");
+    }
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+//------------------------------------------------------------------------
+// Test SSE
+//------------------------------------------------------------------------
+#include "harness_m128.h"
+
+#if HAVE_m128
+
+void TestSSE() {
+    tbb::concurrent_vector<ClassWithSSE> v;
+    for( int i=0; i<100; ++i ) {
+        v.push_back(ClassWithSSE(i));
+        for( int j=0; i<i; ++j ) 
+            ASSERT( v[j]==ClassWithSSE(j), NULL );
+    }
+}
+#endif /* HAVE_m128 */
+
+//------------------------------------------------------------------------
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("ERROR: MinThread=%d, but must be at least 1\n",MinThread); MinThread = 1;
+    }
+#if !TBB_DEPRECATED
+    TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>();
+    TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>();
+    TestSequentialFor<FooWithAssign> ();
+    TestResizeAndCopy();
+    TestAssign();
+#if HAVE_m128
+    TestSSE();
+#endif /* HAVE_m128 */    
+#endif
+    TestCapacity();
+    ASSERT( !FooCount, NULL );
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        tbb::task_scheduler_init init( nthread );
+        TestParallelFor( nthread );
+        TestConcurrentGrowToAtLeast();
+        TestConcurrentGrowBy( nthread );
+    }
+    ASSERT( !FooCount, NULL );
+#if !TBB_DEPRECATED
+    TestComparison();
+    TestFindPrimes();
+    TestSort();
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception safety test is skipped.\n");
+#elif TBB_USE_EXCEPTIONS
+    TestExceptions();
+#endif /* TBB_USE_EXCEPTIONS */
+#endif /* !TBB_DEPRECATED */
+    ASSERT( !FooCount, NULL );
+    REMARK("sizeof(concurrent_vector<int>) == %d\n", (int)sizeof(tbb::concurrent_vector<int>));
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_condition_variable.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_condition_variable.h
new file mode 100644 (file)
index 0000000..5422557
--- /dev/null
@@ -0,0 +1,693 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/compat/condition_variable"
+#include "tbb/mutex.h"
+#include "tbb/recursive_mutex.h"
+#include "tbb/tick_count.h"
+#include "tbb/atomic.h"
+
+#include "harness.h"
+
+// This test deliberately avoids a "using tbb" statement,
+// so that the error of putting types in the wrong namespace will be caught.
+
+using namespace std;
+
+template<typename M>
+struct Counter {
+    typedef M mutex_type;
+    M mutex;
+    volatile long value; 
+    void flog_once_lock_guard( size_t mode );
+    void flog_once_unique_lock( size_t mode );
+};
+
+template<typename M>
+void Counter<M>::flog_once_lock_guard(size_t mode)
+/** Increments counter once for each iteration in the iteration space. */
+{
+    if( mode&1 ) {
+        // Try acquire and release with implicit lock_guard
+        // precondition: if mutex_type is not a recursive mutex, the calling thread does not own the mutex m.
+        // if the prcondition is not met, either dead-lock incorrect 'value' would result in.
+        lock_guard<M> lg(mutex);
+        value = value+1;
+    } else {
+        // Try acquire and release with adopt lock_quard
+        // precodition: the calling thread owns the mutex m.
+        // if the prcondition is not met, incorrect 'value' would result in because the thread unlocks
+        // mutex that it does not own.
+        mutex.lock();
+        lock_guard<M> lg( mutex, adopt_lock );
+        value = value+1;
+    }
+}
+
+template<typename M>
+void Counter<M>::flog_once_unique_lock(size_t mode)
+/** Increments counter once for each iteration in the iteration space. */
+{
+    switch( mode&7 ) {
+    case 0:
+        {// implicitly acquire and release mutex with unique_lock
+          unique_lock<M> ul( mutex );
+          value = value+1;
+          ASSERT( ul==true, NULL );
+        }
+        break;
+    case 1:
+        {// unique_lock with defer_lock
+          unique_lock<M> ul( mutex, defer_lock );
+          ASSERT( ul.owns_lock()==false, NULL );
+          ul.lock();
+          value = value+1;
+          ASSERT( ul.owns_lock()==true, NULL );
+        }
+        break;
+    case 2:
+        {// unique_lock::try_lock() with try_to_lock
+          unique_lock<M> ul( mutex, try_to_lock );
+          if( !ul )
+              while( !ul.try_lock() )
+                  __TBB_Yield();
+          value = value+1;
+        }
+        break;
+    case 3:
+        {// unique_lock::try_lock_for() with try_to_lock
+          unique_lock<M> ul( mutex, defer_lock );
+          tbb::tick_count::interval_t i(1.0);
+          while( !ul.try_lock_for( i ) )
+              ;
+          value = value+1;
+          ASSERT( ul.owns_lock()==true, NULL );
+        }
+        break;
+    case 4:
+        {
+          unique_lock<M> ul_o4;
+          {// unique_lock with adopt_lock
+            mutex.lock();
+            unique_lock<M> ul( mutex, adopt_lock );
+            value = value+1;
+            ASSERT( ul.owns_lock()==true, NULL );
+            ASSERT( ul.mutex()==&mutex, NULL );
+            ASSERT( ul_o4.owns_lock()==false, NULL );
+            ASSERT( ul_o4.mutex()==NULL, NULL );
+            swap( ul, ul_o4 );
+            ASSERT( ul.owns_lock()==false, NULL );
+            ASSERT( ul.mutex()==NULL, NULL );
+            ASSERT( ul_o4.owns_lock()==true, NULL );
+            ASSERT( ul_o4.mutex()==&mutex, NULL );
+            ul_o4.unlock();
+          }
+          ASSERT( ul_o4.owns_lock()==false, NULL );
+        }
+        break;
+    case 5:
+        {
+          unique_lock<M> ul_o5;
+          {// unique_lock with adopt_lock
+            mutex.lock();
+            unique_lock<M> ul( mutex, adopt_lock );
+            value = value+1;
+            ASSERT( ul.owns_lock()==true, NULL );
+            ASSERT( ul.mutex()==&mutex, NULL );
+            ASSERT( ul_o5.owns_lock()==false, NULL );
+            ASSERT( ul_o5.mutex()==NULL, NULL );
+            ul_o5.swap( ul );
+            ASSERT( ul.owns_lock()==false, NULL );
+            ASSERT( ul.mutex()==NULL, NULL );
+            ASSERT( ul_o5.owns_lock()==true, NULL );
+            ASSERT( ul_o5.mutex()==&mutex, NULL );
+            ul_o5.unlock();
+          }
+          ASSERT( ul_o5.owns_lock()==false, NULL );
+        }
+        break;
+    default:
+        {// unique_lock with adopt_lock, and release()
+          mutex.lock();
+          unique_lock<M> ul( mutex, adopt_lock );
+          ASSERT( ul==true, NULL );
+          value = value+1;
+          M* old_m = ul.release();
+          old_m->unlock();
+          ASSERT( ul.owns_lock()==false, NULL );
+        }
+        break;
+    }
+}
+
+static tbb::atomic<size_t> Order;
+
+template<typename State, long TestSize>
+struct WorkForLocks: NoAssign {
+    static const size_t chunk = 100;
+    State& state;
+    WorkForLocks( State& state_ ) : state(state_) {}
+    void operator()( int ) const {
+        size_t step;
+        while( (step=Order.fetch_and_add<tbb::acquire>(chunk))<TestSize ) {
+            for( size_t i=0; i<chunk && step<TestSize; ++i, ++step ) {
+                state.flog_once_lock_guard(step);
+                state.flog_once_unique_lock(step);
+            }
+        }
+    }
+};
+
+template<typename M>
+void TestLocks( const char* name, int nthread ) {
+    REMARK("testing %s in TestLocks\n",name);
+    Counter<M> counter;
+    counter.value = 0;
+    Order = 0;
+    const long test_size = 100000;
+    NativeParallelFor( nthread, WorkForLocks<Counter<M>, test_size>(counter) );
+
+    if( counter.value!=2*test_size )
+        REPORT("ERROR for %s in TestLocks: counter.value=%ld != 2 * %ld=test_size\n",name,counter.value,test_size);
+}
+
+static tbb::atomic<int> barrier;
+
+// Test if the constructor works and if native_handle() works
+template<typename M>
+struct WorkForCondVarCtor: NoAssign {
+    condition_variable& my_cv;
+    M& my_mtx;
+    WorkForCondVarCtor( condition_variable& cv_, M& mtx_ ) : my_cv(cv_), my_mtx(mtx_) {}
+    void operator()( int tid ) const {
+        ASSERT( tid<=1, NULL ); // test with 2 threads.
+        condition_variable::native_handle_type handle = my_cv.native_handle();
+        if( tid&1 ) {
+            my_mtx.lock();
+            ++barrier;
+#if _WIN32||_WIN64
+            if( !tbb::interface5::internal::internal_condition_variable_wait( *handle, &my_mtx ) ) {
+                int ec = GetLastError();
+                ASSERT( ec!=WAIT_TIMEOUT, NULL );
+                throw_exception( tbb::internal::eid_condvar_wait_failed );
+            }
+#else
+            if( pthread_cond_wait( handle, my_mtx.native_handle() ) )
+                throw_exception( tbb::internal::eid_condvar_wait_failed );
+#endif
+            ++barrier;
+            my_mtx.unlock();
+        } else {
+            bool res;
+            while( (res=my_mtx.try_lock())==true && barrier==0 ) {
+                my_mtx.unlock();
+                __TBB_Yield();
+            }
+            if( res ) my_mtx.unlock();
+            do {
+#if _WIN32||_WIN64
+                tbb::interface5::internal::internal_condition_variable_notify_one( *handle );
+#else
+                pthread_cond_signal( handle );
+#endif
+                __TBB_Yield();
+            } while ( barrier<2 );
+        }
+    }
+};
+
+static condition_variable* test_cv;
+static tbb::atomic<int> n_waiters;
+
+// Test if the destructor works 
+template<typename M>
+struct WorkForCondVarDtor: NoAssign {
+    int nthread;
+    M& my_mtx;
+    WorkForCondVarDtor( int n, M& mtx_ ) : nthread(n), my_mtx(mtx_) {}
+    void operator()( int tid ) const {
+        if( tid==0 ) {
+            unique_lock<M> ul( my_mtx, defer_lock );
+            test_cv = new condition_variable;
+
+            while( n_waiters<nthread-1 )
+                __TBB_Yield();
+            ul.lock();
+            test_cv->notify_all();
+            ul.unlock();
+            while( n_waiters>0 )
+                __TBB_Yield();
+            delete test_cv;
+        } else {
+            while( test_cv==NULL )
+                __TBB_Yield();
+            unique_lock<M> ul(my_mtx);
+            ++n_waiters;
+            test_cv->wait( ul );
+            --n_waiters;
+        }
+    }
+};
+
+static const int max_ticket  = 100;
+static const int short_delay = 10;
+static const int long_delay  = 100;
+
+tbb::atomic<int> n_signaled;
+tbb::atomic<int> n_done, n_done_1, n_done_2;
+tbb::atomic<int> n_timed_out;
+
+static bool false_to_true;
+
+struct TestPredicateFalseToTrue {
+    TestPredicateFalseToTrue() {}
+    bool operator()() { return false_to_true; }
+};
+
+struct TestPredicateFalse {
+    TestPredicateFalse() {}
+    bool operator()() { return false; }
+};
+
+struct TestPredicateTrue {
+    TestPredicateTrue() {}
+    bool operator()() { return true; }
+};
+
+// Test timed wait and timed wait with pred
+template<typename M>
+struct WorkForCondVarTimedWait: NoAssign {
+    int nthread;
+    condition_variable& test_cv;
+    M& my_mtx;
+    WorkForCondVarTimedWait( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {}
+    void operator()( int tid ) const {
+        tbb::tick_count t1, t2;
+
+        unique_lock<M> ul( my_mtx, defer_lock );
+
+        ASSERT( n_timed_out==0, NULL );
+        ++barrier;
+        while( barrier<nthread ) __TBB_Yield();
+
+        // test if a thread times out with wait_for()
+        for( int i=1; i<10; ++i ) {
+            tbb::tick_count::interval_t intv((double)i*0.0001 /*seconds*/);
+            ul.lock();
+            cv_status st = no_timeout;
+            __TBB_TRY {
+                /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/
+                st = test_cv.wait_for( ul, intv );
+            } __TBB_CATCH( std::runtime_error& ) {}
+            ASSERT( ul, "mutex should have been reacquired" );
+            ul.unlock();
+            if( st==timeout )
+                ++n_timed_out;
+        }
+
+        ASSERT( n_timed_out>0, "should have been timed-out at least once\n" );
+        ++n_done_1;
+        while( n_done_1<nthread ) __TBB_Yield();
+
+        for( int i=1; i<10; ++i ) {
+            tbb::tick_count::interval_t intv((double)i*0.0001 /*seconds*/);
+            ul.lock();
+            __TBB_TRY {
+                /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/
+                ASSERT( false==test_cv.wait_for( ul, intv, TestPredicateFalse()), "incorrect return value" );
+            } __TBB_CATCH( std::runtime_error& ) {}
+            ASSERT( ul, "mutex should have been reacquired" );
+            ul.unlock();
+        }
+
+        if( tid==0 )
+            n_waiters = 0;
+        // barrier
+        ++n_done_2;
+        while( n_done_2<nthread ) __TBB_Yield();
+
+        // at this point, we know wait_for() successfully times out.
+        // so test if a thread blocked on wait_for() could receive a signal before its waiting time elapses.
+        if( tid==0 ) {
+            // signaler
+            n_signaled = 0;
+            ASSERT( n_waiters==0, NULL );
+            ++n_done_2; // open gate 1
+
+            while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv. flag_1
+
+            ul.lock();
+            test_cv.notify_all();
+            n_waiters = 0;
+            ul.unlock();
+
+            while( n_done_2<2*nthread ) __TBB_Yield();
+            ASSERT( n_signaled>0, "too small an interval?" );
+            n_signaled = 0;
+
+        } else {
+            while( n_done_2<nthread+1 ) __TBB_Yield(); // gate 1
+
+            // sleeper
+            tbb::tick_count::interval_t intv((double)2.0 /*seconds*/);
+            ul.lock();
+            ++n_waiters; // raise flag 1/(nthread-1) 
+            t1 = tbb::tick_count::now();
+            cv_status st = test_cv.wait_for( ul, intv ); // gate 2
+            t2 = tbb::tick_count::now();
+            ul.unlock();
+            if( st==no_timeout ) {
+                ++n_signaled;
+                ASSERT( (t2-t1).seconds()<intv.seconds(), "got a signal after timed-out?" );
+            }
+        }
+
+        ASSERT( n_done==0, NULL );
+        ++n_done_2;
+
+        if( tid==0 ) {
+            ASSERT( n_waiters==0, NULL );
+            ++n_done; // open gate 3
+
+            while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv.
+            for( int i=0; i<2*short_delay; ++i ) __TBB_Yield();  // give some time to waiters so that all of them in the waitq 
+            ul.lock();
+            false_to_true = true;
+            test_cv.notify_all(); // open gate 4
+            ul.unlock();
+
+            while( n_done<nthread ) __TBB_Yield(); // wait until all other threads wake up.
+            ASSERT( n_signaled>0, "too small an interval?" );
+        } else {
+
+            while( n_done<1 ) __TBB_Yield(); // gate 3
+
+            tbb::tick_count::interval_t intv((double)2.0 /*seconds*/);
+            ul.lock();
+            ++n_waiters;
+            // wait_for w/ predciate
+            t1 = tbb::tick_count::now();
+            ASSERT( test_cv.wait_for( ul, intv, TestPredicateFalseToTrue())==true, NULL ); // gate 4
+            t2 = tbb::tick_count::now();
+            ul.unlock();
+            if( (t2-t1).seconds()<intv.seconds() )
+                ++n_signaled;
+            ++n_done;
+        }
+    }
+};
+
+tbb::atomic<int> ticket_for_sleep, ticket_for_wakeup, signaled_ticket, wokeup_ticket;
+tbb::atomic<unsigned> n_visit_to_waitq;
+unsigned max_waitq_length;
+
+template<typename M>
+struct WorkForCondVarWaitAndNotifyOne: NoAssign {
+    int nthread;
+    condition_variable& test_cv;
+    M& my_mtx;
+    WorkForCondVarWaitAndNotifyOne( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {}
+    void operator()( int tid ) const {
+        if( tid&1 ) {
+            // exercise signal part
+            while( ticket_for_wakeup<max_ticket ) {
+                int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket
+                if( my_ticket>max_ticket )
+                    break;
+
+                for( ;; ) {
+                    unique_lock<M> ul( my_mtx, defer_lock );
+                    ul.lock();
+                    if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) {
+                        signaled_ticket = my_ticket;
+                        test_cv.notify_one();
+                        ++n_signaled;
+                        ul.unlock();
+                        break;
+                    }
+                    ul.unlock();
+                    __TBB_Yield();
+                }
+    
+                // give waiters time to go to sleep.
+                for( int m=0; m<short_delay; ++m )
+                    __TBB_Yield();
+            }
+        } else {
+            while( ticket_for_sleep<max_ticket ) {
+                unique_lock<M> ul( my_mtx, defer_lock );
+                ul.lock();
+                // exercise wait part
+                int my_ticket = ++ticket_for_sleep; // grab my ticket
+                if( my_ticket>max_ticket ) break;
+
+                // each waiter should go to sleep at least once
+                unsigned nw = ++n_waiters;
+                for( ;; ) {
+                    // update to max_waitq_length 
+                    if( nw>max_waitq_length ) max_waitq_length = nw;
+                    ++n_visit_to_waitq;
+                    test_cv.wait( ul );
+                    // if( ret==false ) ++n_timedout;
+                    ASSERT( ul, "mutex should have been locked" );
+                    --n_waiters;
+                    if( signaled_ticket==my_ticket ) {
+                        wokeup_ticket = my_ticket;
+                        break;
+                    }
+                    if( n_waiters>0 )
+                        test_cv.notify_one();
+                    nw = ++n_waiters; // update to max_waitq_length occurs above
+                }
+
+                ul.unlock();
+                __TBB_Yield(); // give other threads chance to run.
+            }
+        }
+        ++n_done;
+        spin_wait_until_eq( n_done, nthread );
+        ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" );
+    }
+};
+
+struct TestPredicate1 {
+    int target;
+    TestPredicate1( int i_ ) : target(i_) {}
+    bool operator()( ) { return signaled_ticket==target; }
+};
+
+template<typename M>
+struct WorkForCondVarWaitPredAndNotifyAll: NoAssign {
+    int nthread;
+    condition_variable& test_cv;
+    M& my_mtx;
+    int multiple;
+    WorkForCondVarWaitPredAndNotifyAll( int n_, condition_variable& cv_, M& mtx_, int m_ ) : 
+        nthread(n_), test_cv(cv_), my_mtx(mtx_), multiple(m_) {}
+    void operator()( int tid ) const {
+        if( tid&1 ) {
+            while( ticket_for_sleep<max_ticket ) {
+                unique_lock<M> ul( my_mtx, defer_lock );
+                // exercise wait part
+                int my_ticket = ++ticket_for_sleep; // grab my ticket
+                if( my_ticket>max_ticket ) 
+                    break;
+
+                ul.lock();
+                ++n_visit_to_waitq;
+                unsigned nw = ++n_waiters;
+                if( nw>max_waitq_length ) max_waitq_length = nw;
+                test_cv.wait( ul, TestPredicate1( my_ticket ) );
+                wokeup_ticket = my_ticket;
+                --n_waiters;
+                ASSERT( ul, "mutex should have been locked" );
+                ul.unlock();
+
+                __TBB_Yield(); // give other threads chance to run.
+            }
+        } else {
+            // exercise signal part
+            while( ticket_for_wakeup<max_ticket ) {
+                int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket
+                if( my_ticket>max_ticket )
+                    break;
+
+                for( ;; ) {
+                    unique_lock<M> ul( my_mtx );
+                    if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) {
+                        signaled_ticket = my_ticket;
+                        test_cv.notify_all();
+                        ++n_signaled;
+                        ul.unlock();
+                        break;
+                    }
+                    ul.unlock();
+                    __TBB_Yield();
+                }
+    
+                // give waiters time to go to sleep.
+                for( int m=0; m<long_delay*multiple; ++m )
+                    __TBB_Yield();
+            }
+        }
+        ++n_done;
+        spin_wait_until_eq( n_done, nthread );
+        ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" );
+    }
+};
+
+void InitGlobalCounters()
+{
+      ticket_for_sleep = ticket_for_wakeup = signaled_ticket = wokeup_ticket = 0;
+      n_waiters = 0;
+      n_signaled = 0;
+      n_done = n_done_1 = n_done_2 = 0;
+      n_visit_to_waitq = 0;
+      n_timed_out = 0;
+}
+
+template<typename M>
+void TestConditionVariable( const char* name, int nthread )
+{
+    REMARK("testing %s in TestConditionVariable\n",name);
+    Counter<M> counter;
+    M mtx;
+
+    ASSERT( nthread>1, "at least two threads are needed for testing condition_variable" );
+    REMARK(" - constructor\n" );
+    // Test constructor.
+    {
+      condition_variable cv1;
+#if _WIN32||_WIN64
+      condition_variable::native_handle_type handle = cv1.native_handle();
+      ASSERT( uintptr_t(&handle->cv_event)==uintptr_t(&handle->cv_native), NULL );
+#endif
+      M mtx1;
+      barrier = 0;
+      NativeParallelFor( 2, WorkForCondVarCtor<M>( cv1, mtx1 ) );
+    }
+
+    REMARK(" - destructor\n" );
+    // Test destructor.
+    {
+      M mtx2;
+      test_cv = NULL;
+      n_waiters = 0;
+      NativeParallelFor( nthread, WorkForCondVarDtor<M>( nthread, mtx2 ) );
+    }
+
+    REMARK(" - timed_wait (i.e., wait_for)\n");
+    // Test timed wait.
+    {
+      condition_variable cv_tw;
+      M mtx_tw;
+      barrier = 0;
+      InitGlobalCounters();
+      int nthr = nthread>4?4:nthread;
+      NativeParallelFor( nthr, WorkForCondVarTimedWait<M>( nthr, cv_tw, mtx_tw ) );
+    }
+
+    REMARK(" - wait with notify_one\n");
+    // Test wait and notify_one
+    do {
+        condition_variable cv3;
+        M mtx3;
+        InitGlobalCounters();
+        NativeParallelFor( nthread, WorkForCondVarWaitAndNotifyOne<M>( nthread, cv3, mtx3 ) );
+    } while( n_visit_to_waitq==0 || max_waitq_length==0 );
+
+    REMARK(" - predicated wait with notify_all\n");
+    // Test wait_pred and notify_all
+    int delay_multiple = 1;
+    do {
+        condition_variable cv4;
+        M mtx4;
+        InitGlobalCounters();
+        NativeParallelFor( nthread, WorkForCondVarWaitPredAndNotifyAll<M>( nthread, cv4, mtx4, delay_multiple ) );
+        if( max_waitq_length<unsigned(nthread/2) )
+            ++delay_multiple;
+    } while( n_visit_to_waitq<=0 || max_waitq_length<unsigned(nthread/2) ); 
+}
+
+#if TBB_USE_EXCEPTIONS
+static tbb::atomic<int> err_count;
+
+#define TRY_AND_CATCH_RUNTIME_ERROR(op,msg) \
+        try {                             \
+            op;                           \
+            ++err_count;                  \
+        } catch( std::runtime_error& e ) {ASSERT( strstr(e.what(), msg) , NULL );} catch(...) {++err_count;}
+
+template<typename M>
+void TestUniqueLockException( const char * name ) {
+    REMARK("testing %s TestUniqueLockException\n",name);
+    M mtx;
+    unique_lock<M> ul_0;
+    err_count = 0;
+
+    TRY_AND_CATCH_RUNTIME_ERROR( ul_0.lock(), "Operation not permitted" );
+    TRY_AND_CATCH_RUNTIME_ERROR( ul_0.try_lock(), "Operation not permitted" );
+
+    unique_lock<M> ul_1( mtx );
+
+    TRY_AND_CATCH_RUNTIME_ERROR( ul_1.lock(), "Resource deadlock" );
+    TRY_AND_CATCH_RUNTIME_ERROR( ul_1.try_lock(), "Resource deadlock" );
+
+    ul_1.unlock();
+    TRY_AND_CATCH_RUNTIME_ERROR( ul_1.unlock(), "Operation not permitted" );
+
+    ASSERT( !err_count, "Some exceptions are not thrown or incorrect ones are thrown" );
+}
+
+template<typename M>
+void TestConditionVariableException( const char * name ) {
+    REMARK("testing %s in TestConditionVariableException; yet to be implemented\n",name);
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+template<typename Mutex, typename RecursiveMutex>
+void DoCondVarTest()
+{
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK( "testing with %d threads\n", p );
+        TestLocks<Mutex>( "mutex", p );
+        TestLocks<RecursiveMutex>( "recursive_mutex", p );
+
+        if( p<=1 ) continue;
+
+        // for testing condition_variable, at least one sleeper and one notifier are needed
+        TestConditionVariable<Mutex>( "mutex", p );
+    }
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception handling tests are skipped.\n");
+#elif TBB_USE_EXCEPTIONS
+    TestUniqueLockException<Mutex>( "mutex" );
+    TestUniqueLockException<RecursiveMutex>( "recursive_mutex" );
+    TestConditionVariableException<Mutex>( "mutex" );
+#endif /* TBB_USE_EXCEPTIONS */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_critical_section.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_critical_section.cpp
new file mode 100644 (file)
index 0000000..7ca00b8
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// test critical section
+//
+#include "tbb/critical_section.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/enumerable_thread_specific.h"
+#include "tbb/tick_count.h"
+#include "harness_assert.h"
+#include "harness.h"
+#include <math.h>
+
+#include "harness_barrier.h"
+Harness::SpinBarrier sBarrier;
+tbb::critical_section cs;
+const int MAX_WORK = 300;
+
+struct BusyBody : NoAssign {
+    tbb::enumerable_thread_specific<double> &locals;
+    const int nThread;
+    const int WorkRatiox100;
+    int &unprotected_count;
+    bool test_throw;
+
+    BusyBody( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) :
+        locals(locals_), 
+        nThread(nThread_), 
+        WorkRatiox100(workRatiox100_), 
+        unprotected_count(unprotected_count_),
+        test_throw(test_throw_) { 
+        sBarrier.initialize(nThread_);
+    }
+
+    void operator()(const int /* threadID */ ) const {
+        int nIters = MAX_WORK/nThread;
+        sBarrier.wait();
+        tbb::tick_count t0 = tbb::tick_count::now();
+        for(int j = 0; j < nIters; j++) {
+
+            for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) {
+                locals.local() += 1.0;
+            }
+            cs.lock();
+            ASSERT( !cs.try_lock(), "recursive try_lock must fail" );
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 
+            if(test_throw && j == (nIters / 2)) {
+                bool was_caught = false,
+                     unknown_exception = false;
+                try {
+                    cs.lock();
+                }
+                catch(tbb::improper_lock& e) {
+                    ASSERT( e.what(), "Error message is absent" );
+                    was_caught = true;
+                }
+                catch(...) {
+                    was_caught = unknown_exception = true;
+                }
+                ASSERT(was_caught, "Recursive lock attempt did not throw");
+                ASSERT(!unknown_exception, "tbb::improper_lock exception is expected");
+            }
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN  */
+            for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) {
+                locals.local() += 1.0;
+            }
+            unprotected_count++;
+            cs.unlock();
+        }
+        locals.local() = (tbb::tick_count::now() - t0).seconds();
+    }
+};
+
+struct BusyBodyScoped : NoAssign {
+    tbb::enumerable_thread_specific<double> &locals;
+    const int nThread;
+    const int WorkRatiox100;
+    int &unprotected_count;
+    bool test_throw;
+
+    BusyBodyScoped( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) :
+        locals(locals_), 
+        nThread(nThread_), 
+        WorkRatiox100(workRatiox100_), 
+        unprotected_count(unprotected_count_),
+        test_throw(test_throw_) { 
+        sBarrier.initialize(nThread_);
+    }
+
+    void operator()(const int /* threadID */ ) const {
+        int nIters = MAX_WORK/nThread;
+        sBarrier.wait();
+        tbb::tick_count t0 = tbb::tick_count::now();
+        for(int j = 0; j < nIters; j++) {
+
+            for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) {
+                locals.local() += 1.0;
+            }
+            { 
+                tbb::critical_section::scoped_lock my_lock(cs);
+                for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) {
+                    locals.local() += 1.0;
+                }
+                unprotected_count++;
+            }
+        }
+        locals.local() = (tbb::tick_count::now() - t0).seconds();
+    }
+};
+
+void
+RunOneCriticalSectionTest(int nThreads, int csWorkRatio, bool test_throw) {
+    tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
+    tbb::enumerable_thread_specific<double> test_locals;
+    int myCount = 0;
+    BusyBody myBody(nThreads, csWorkRatio, test_locals, myCount, test_throw);
+    BusyBodyScoped myScopedBody(nThreads, csWorkRatio, test_locals, myCount, test_throw);
+    init.initialize(nThreads);
+    tbb::tick_count t0;
+    {
+        t0 = tbb::tick_count::now();
+        myCount = 0;
+        NativeParallelFor(nThreads, myBody);
+        ASSERT(myCount == (MAX_WORK - (MAX_WORK % nThreads)), NULL);
+        REMARK("%d threads, work ratio %d per cent, time %g", nThreads, csWorkRatio, (tbb::tick_count::now() - t0).seconds());
+        if (nThreads > 1) {
+            double etsSum = 0;
+            double etsMax = 0;
+            double etsMin = 0;
+            double etsSigmaSq = 0;
+            double etsSigma = 0;
+
+            for(tbb::enumerable_thread_specific<double>::const_iterator ci = test_locals.begin(); ci != test_locals.end(); ci++) {
+                etsSum += *ci;
+                if(etsMax==0.0) {
+                    etsMin = *ci;
+                }
+                else {
+                    if(etsMin > *ci) etsMin = *ci;
+                }
+                if(etsMax < *ci) etsMax = *ci;
+            }
+            double etsAvg = etsSum / (double)nThreads;
+            for(tbb::enumerable_thread_specific<double>::const_iterator ci = test_locals.begin(); ci != test_locals.end(); ci++) {
+                etsSigma = etsAvg - *ci;
+                etsSigmaSq += etsSigma * etsSigma;
+            }
+            // an attempt to gauge the "fairness" of the scheduling of the threads.  We figure
+            // the standard deviation, and compare it with the maximum deviation from the
+            // average time.  If the difference is 0 that means all threads finished in the same
+            // amount of time.  If non-zero, the difference is divided by the time, and the
+            // negative log is taken.  If > 2, then the difference is on the order of 0.01*t
+            // where T is the average time.  We aritrarily define this as "fair."
+            etsSigma = sqrt(etsSigmaSq/double(nThreads)); 
+            etsMax -= etsAvg;  // max - a == delta1
+            etsMin = etsAvg - etsMin;  // a - min == delta2
+            if(etsMax < etsMin) etsMax = etsMin;
+            etsMax -= etsSigma;
+            // ASSERT(etsMax >= 0, NULL);  // shouldn't the maximum difference from the mean be > the stddev?
+            etsMax = (etsMax > 0.0) ? etsMax : 0.0;  // possible rounding error
+            double fairness = etsMax / etsAvg;
+            if(fairness == 0.0) {
+                fairness = 100.0;
+            }
+            else fairness = - log10(fairness);
+            if(fairness > 2.0 ) {
+                REMARK("  Fair (%g)\n", fairness);
+            }
+            else {
+                REMARK("  Unfair (%g)\n", fairness);
+            }
+        }
+        myCount = 0;
+        NativeParallelFor(nThreads, myScopedBody);
+        ASSERT(myCount == (MAX_WORK - (MAX_WORK % nThreads)), NULL);
+
+    }
+
+    init.terminate();
+}
+
+void
+RunParallelTests() {
+    for(int p = MinThread; p <= MaxThread; p++) {
+        for(int cs_ratio = 1; cs_ratio < 95; cs_ratio *= 2) {
+            RunOneCriticalSectionTest(p, cs_ratio, /*test_throw*/true);
+        }
+    }
+}
+
+int TestMain () {
+    if(MinThread <= 0) MinThread = 1;
+
+    if(MaxThread > 0) {
+        RunParallelTests();
+    }
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_algorithms.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_algorithms.cpp
new file mode 100644 (file)
index 0000000..833a281
--- /dev/null
@@ -0,0 +1,1281 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <limits.h> // for INT_MAX
+#include "tbb/task_scheduler_init.h"
+#include "tbb/tbb_exception.h"
+#include "tbb/task.h"
+#include "tbb/atomic.h"
+#include "tbb/parallel_for.h"
+#include "tbb/parallel_reduce.h"
+#include "tbb/parallel_do.h"
+#include "tbb/pipeline.h"
+#include "tbb/parallel_scan.h"
+#include "tbb/blocked_range.h"
+#include "harness_assert.h"
+
+#if __TBB_TASK_GROUP_CONTEXT
+
+#define FLAT_RANGE  100000
+#define FLAT_GRAIN  100
+#define NESTING_RANGE  100
+#define NESTING_GRAIN  10
+#define NESTED_RANGE  (FLAT_RANGE / NESTING_RANGE)
+#define NESTED_GRAIN  (FLAT_GRAIN / NESTING_GRAIN)
+
+tbb::atomic<intptr_t> g_FedTasksCount; // number of tasks added by parallel_do feeder
+
+inline intptr_t Existed () { return INT_MAX; }
+
+#include "harness_eh.h"
+
+inline void ResetGlobals (  bool throwException = true, bool flog = false ) {
+    ResetEhGlobals( throwException, flog );
+    g_FedTasksCount = 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Tests for tbb::parallel_for and tbb::parallel_reduce
+
+typedef size_t count_type;
+typedef tbb::blocked_range<count_type> range_type;
+
+inline intptr_t NumSubranges ( intptr_t length, intptr_t grain ) {
+    intptr_t n = 1;
+    for( ; length > grain; length -= length >> 1 )
+        n *= 2;
+    return n;
+}
+
+template<class Body>
+intptr_t TestNumSubrangesCalculation ( intptr_t length, intptr_t grain, intptr_t nested_length, intptr_t nested_grain ) {
+    ResetGlobals();
+    g_ThrowException = false;
+    intptr_t nestingCalls = NumSubranges(length, grain),
+             nestedCalls = NumSubranges(nested_length, nested_grain),
+             maxExecuted = nestingCalls * (nestedCalls + 1);
+    tbb::parallel_for( range_type(0, length, grain), Body() );
+    ASSERT (g_CurExecuted == maxExecuted, "Wrong estimation of bodies invocation count");
+    return maxExecuted;
+}
+
+class NoThrowParForBody {
+public:
+    void operator()( const range_type& r ) const {
+        volatile long x;
+        count_type end = r.end();
+        for( count_type i=r.begin(); i<end; ++i )
+            x = 0;
+    }
+};
+
+#if TBB_USE_EXCEPTIONS
+
+void Test0 () {
+    ResetGlobals();
+    tbb::simple_partitioner p;
+    for( size_t i=0; i<10; ++i ) {
+        tbb::parallel_for( range_type(0, 0, 1), NoThrowParForBody() );
+        tbb::parallel_for( range_type(0, 0, 1), NoThrowParForBody(), p );
+        tbb::parallel_for( range_type(0, 128, 8), NoThrowParForBody() );
+        tbb::parallel_for( range_type(0, 128, 8), NoThrowParForBody(), p );
+    }
+} // void Test0 ()
+
+//! Template that creates a functor suitable for parallel_reduce from a functor for parallel_for.
+template<typename ParForBody>
+class SimpleParReduceBody: NoAssign {
+    ParForBody m_Body;
+public:
+    void operator()( const range_type& r ) const { m_Body(r); }
+    SimpleParReduceBody() {}
+    SimpleParReduceBody( SimpleParReduceBody& left, tbb::split ) : m_Body(left.m_Body) {}
+    void join( SimpleParReduceBody& /*right*/ ) {}
+}; // SimpleParReduceBody
+
+//! Test parallel_for and parallel_reduce for a given partitioner.
+/** The Body need only be suitable for a parallel_for. */
+template<typename ParForBody, typename Partitioner>
+void TestParallelLoopAux( Partitioner& partitioner ) {
+    for( int i=0; i<2; ++i ) {
+        ResetGlobals();
+        TRY();
+            if( i==0 )
+                tbb::parallel_for( range_type(0, FLAT_RANGE, FLAT_GRAIN), ParForBody(), partitioner );
+            else {
+                SimpleParReduceBody<ParForBody> rb;
+                tbb::parallel_reduce( range_type(0, FLAT_RANGE, FLAT_GRAIN), rb, partitioner );
+            }
+        CATCH_AND_ASSERT();
+        ASSERT (exceptionCaught, "No exception thrown from the nesting parallel_for");
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+        ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+        if ( !g_SolitaryException )
+            ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+    }
+}
+
+//! Test with parallel_for and parallel_reduce, over all three kinds of partitioners.
+/** The Body only needs to be suitable for tbb::parallel_for. */
+template<typename Body>
+void TestParallelLoop() {
+    // The simple and auto partitioners should be const, but not the affinity partitioner.
+    const tbb::simple_partitioner p0;
+    TestParallelLoopAux<Body>( p0 );
+    const tbb::auto_partitioner p1;
+    TestParallelLoopAux<Body>( p1 );
+}
+
+class SimpleParForBody: NoAssign {
+public:
+    void operator()( const range_type& r ) const {
+        Harness::ConcurrencyTracker ct;
+        volatile long x;
+        for( count_type i = r.begin(); i != r.end(); ++i )
+            x = 0;
+        ++g_CurExecuted;
+        WaitUntilConcurrencyPeaks();
+        ThrowTestException(1);
+    }
+};
+
+void Test1() {
+    TestParallelLoop<SimpleParForBody>();
+} // void Test1 ()
+
+class NestingParForBody: NoAssign {
+public:
+    void operator()( const range_type& ) const {
+        Harness::ConcurrencyTracker ct;
+        ++g_CurExecuted;
+        tbb::parallel_for( tbb::blocked_range<size_t>(0, NESTED_RANGE, NESTED_GRAIN), SimpleParForBody() );
+    }
+};
+
+//! Uses parallel_for body containing a nested parallel_for with the default context not wrapped by a try-block.
+/** Nested algorithms are spawned inside the new bound context by default. Since
+    exceptions thrown from the nested parallel_for are not handled by the caller
+    (nesting parallel_for body) in this test, they will cancel all the sibling nested
+    algorithms. **/
+void Test2 () {
+    TestParallelLoop<NestingParForBody>();
+} // void Test2 ()
+
+class NestingParForBodyWithIsolatedCtx {
+public:
+    void operator()( const range_type& ) const {
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        ++g_CurExecuted;
+        tbb::parallel_for( tbb::blocked_range<size_t>(0, NESTED_RANGE, NESTED_GRAIN), SimpleParForBody(), tbb::simple_partitioner(), ctx );
+    }
+};
+
+//! Uses parallel_for body invoking a nested parallel_for with an isolated context without a try-block.
+/** Even though exceptions thrown from the nested parallel_for are not handled
+    by the caller in this test, they will not affect sibling nested algorithms
+    already running because of the isolated contexts. However because the first
+    exception cancels the root parallel_for only the first g_NumThreads subranges
+    will be processed (which launch nested parallel_fors) **/
+void Test3 () {
+    ResetGlobals();
+    typedef NestingParForBodyWithIsolatedCtx body_type;
+    intptr_t  nestedCalls = NumSubranges(NESTED_RANGE, NESTED_GRAIN),
+            minExecuted = (g_NumThreads - 1) * nestedCalls;
+    TRY();
+        tbb::parallel_for( range_type(0, NESTING_RANGE, NESTING_GRAIN), body_type() );
+    CATCH_AND_ASSERT();
+    ASSERT (exceptionCaught, "No exception thrown from the nesting parallel_for");
+    if ( g_SolitaryException ) {
+        ASSERT (g_CurExecuted > minExecuted, "Too few tasks survived exception");
+        ASSERT (g_CurExecuted <= minExecuted + (g_ExecutedAtCatch + g_NumThreads), "Too many tasks survived exception");
+    }
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+} // void Test3 ()
+
+class NestingParForExceptionSafeBody {
+public:
+    void operator()( const range_type& ) const {
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        TRY();
+            tbb::parallel_for( tbb::blocked_range<size_t>(0, NESTED_RANGE, NESTED_GRAIN), SimpleParForBody(), tbb::simple_partitioner(), ctx );
+        CATCH();
+    }
+};
+
+//! Uses parallel_for body invoking a nested parallel_for (with default bound context) inside a try-block.
+/** Since exception(s) thrown from the nested parallel_for are handled by the caller
+    in this test, they do not affect neither other tasks of the the root parallel_for
+    nor sibling nested algorithms. **/
+void Test4 () {
+    ResetGlobals( true, true );
+    intptr_t  nestedCalls = NumSubranges(NESTED_RANGE, NESTED_GRAIN),
+            nestingCalls = NumSubranges(NESTING_RANGE, NESTING_GRAIN),
+            maxExecuted = nestingCalls * nestedCalls;
+    TRY();
+        tbb::parallel_for( range_type(0, NESTING_RANGE, NESTING_GRAIN), NestingParForExceptionSafeBody() );
+    CATCH();
+    ASSERT (!exceptionCaught, "All exceptions must have been handled in the parallel_for body");
+    intptr_t  minExecuted = 0;
+    if ( g_SolitaryException ) {
+        minExecuted = maxExecuted - nestedCalls;
+        ASSERT (g_Exceptions == 1, "No exception registered");
+        ASSERT (g_CurExecuted >= minExecuted, "Too few tasks executed");
+        ASSERT (g_CurExecuted <= minExecuted + g_NumThreads, "Too many tasks survived exception");
+    }
+    else {
+        minExecuted = g_Exceptions;
+        ASSERT (g_Exceptions > 1 && g_Exceptions <= nestingCalls, "Unexpected actual number of exceptions");
+        ASSERT (g_CurExecuted >= minExecuted, "Too many executed tasks reported");
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived multiple exceptions");
+        ASSERT (g_CurExecuted <= nestingCalls * (1 + g_NumThreads), "Too many tasks survived exception");
+    }
+} // void Test4 ()
+
+#endif /* TBB_USE_EXCEPTIONS */
+
+class ParForBodyToCancel {
+public:
+    void operator()( const range_type& ) const {
+        ++g_CurExecuted;
+        CancellatorTask::WaitUntilReady();
+    }
+};
+
+template<class B>
+class ParForLauncherTask : public tbb::task {
+    tbb::task_group_context &my_ctx;
+
+    tbb::task* execute () {
+        tbb::parallel_for( range_type(0, FLAT_RANGE, FLAT_GRAIN), B(), tbb::simple_partitioner(), my_ctx );
+        return NULL;
+    }
+public:
+    ParForLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {}
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+void TestCancelation1 () {
+    ResetGlobals( false );
+    RunCancellationTest<ParForLauncherTask<ParForBodyToCancel>, CancellatorTask>( NumSubranges(FLAT_RANGE, FLAT_GRAIN) / 4 );
+    ASSERT (g_CurExecuted < g_ExecutedAtCatch + g_NumThreads, "Too many tasks were executed after cancellation");
+}
+
+class CancellatorTask2 : public tbb::task {
+    tbb::task_group_context &m_GroupToCancel;
+
+    tbb::task* execute () {
+        Harness::ConcurrencyTracker ct;
+        WaitUntilConcurrencyPeaks();
+        m_GroupToCancel.cancel_group_execution();
+        g_ExecutedAtCatch = g_CurExecuted;
+        return NULL;
+    }
+public:
+    CancellatorTask2 ( tbb::task_group_context& ctx, intptr_t ) : m_GroupToCancel(ctx) {}
+};
+
+class ParForBodyToCancel2 {
+public:
+    void operator()( const range_type& ) const {
+        ++g_CurExecuted;
+        Harness::ConcurrencyTracker ct;
+        // The test will hang (and be timed out by the test system) if is_cancelled() is broken
+        while( !tbb::task::self().is_cancelled() )
+            __TBB_Yield();
+    }
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+/** This version also tests task::is_cancelled() method. **/
+void TestCancelation2 () {
+    ResetGlobals();
+    RunCancellationTest<ParForLauncherTask<ParForBodyToCancel2>, CancellatorTask2>();
+    ASSERT (g_ExecutedAtCatch < g_NumThreads, "Somehow worker tasks started their execution before the cancellator task");
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Some tasks were executed after cancellation");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Regression test based on the contribution by the author of the following forum post:
+// http://softwarecommunity.intel.com/isn/Community/en-US/forums/thread/30254959.aspx
+
+#define LOOP_COUNT 16
+#define MAX_NESTING 3
+#define REDUCE_RANGE 1024
+#define REDUCE_GRAIN 256
+
+class Worker {
+public:
+    void DoWork (int & result, int nest);
+};
+
+class RecursiveParReduceBodyWithSharedWorker {
+    Worker * m_SharedWorker;
+    int m_NestingLevel;
+    int m_Result;
+public:
+    RecursiveParReduceBodyWithSharedWorker ( RecursiveParReduceBodyWithSharedWorker& src, tbb::split )
+        : m_SharedWorker(src.m_SharedWorker)
+        , m_NestingLevel(src.m_NestingLevel)
+        , m_Result(0)
+    {}
+    RecursiveParReduceBodyWithSharedWorker ( Worker *w, int nesting )
+        : m_SharedWorker(w)
+        , m_NestingLevel(nesting)
+        , m_Result(0)
+    {}
+
+    void operator() ( const tbb::blocked_range<size_t>& r ) {
+        for (size_t i = r.begin (); i != r.end (); ++i) {
+            int subtotal = 0;
+            m_SharedWorker->DoWork (subtotal, m_NestingLevel);
+            m_Result += subtotal;
+        }
+    }
+    void join (const RecursiveParReduceBodyWithSharedWorker & x) {
+        m_Result += x.m_Result;
+    }
+    int result () { return m_Result; }
+};
+
+void Worker::DoWork ( int& result, int nest ) {
+    ++nest;
+    if ( nest < MAX_NESTING ) {
+        RecursiveParReduceBodyWithSharedWorker rt (this, nest);
+        tbb::parallel_reduce (tbb::blocked_range<size_t>(0, REDUCE_RANGE, REDUCE_GRAIN), rt);
+        result = rt.result ();
+    }
+    else
+        ++result;
+}
+
+//! Regression test for hanging that occurred with the first version of cancellation propagation
+void TestCancelation3 () {
+    Worker w;
+    int result = 0;
+    w.DoWork (result, 0);
+    ASSERT ( result == 1048576, "Wrong calculation result");
+}
+
+void RunParForAndReduceTests () {
+    REMARK( "parallel for and reduce tests\n" );
+    tbb::task_scheduler_init init (g_NumThreads);
+    g_Master = Harness::CurrentTid();
+
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    Test0();
+    Test1();
+    Test2();
+    Test3();
+    Test4();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+    TestCancelation1();
+    TestCancelation2();
+    TestCancelation3();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Tests for tbb::parallel_do
+
+#define ITER_RANGE          1000
+#define ITEMS_TO_FEED       50
+#define NESTED_ITER_RANGE   100
+#define NESTING_ITER_RANGE  50
+
+#define PREPARE_RANGE(Iterator, rangeSize)  \
+    size_t test_vector[rangeSize + 1]; \
+    for (int i =0; i < rangeSize; i++) \
+        test_vector[i] = i; \
+    Iterator begin(&test_vector[0]); \
+    Iterator end(&test_vector[rangeSize])
+
+void Feed ( tbb::parallel_do_feeder<size_t> &feeder, size_t val ) {
+    if (g_FedTasksCount < ITEMS_TO_FEED) { 
+        ++g_FedTasksCount; 
+        feeder.add(val);
+    }
+}
+
+#include "harness_iterator.h"
+
+#if TBB_USE_EXCEPTIONS
+
+// Simple functor object with exception
+class SimpleParDoBody {
+public:
+    void operator() ( size_t &value ) const {
+        ++g_CurExecuted;
+        Harness::ConcurrencyTracker ct;
+        value += 1000;
+        WaitUntilConcurrencyPeaks();
+        ThrowTestException(1);
+    }
+};
+
+// Simple functor object with exception and feeder
+class SimpleParDoBodyWithFeeder : SimpleParDoBody {
+public:
+    void operator() ( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        Feed(feeder, 0);
+        SimpleParDoBody::operator()(value);
+    }
+};
+
+// Tests exceptions without nesting
+template <class Iterator, class simple_body>
+void Test1_parallel_do () {
+    ResetGlobals();
+    PREPARE_RANGE(Iterator, ITER_RANGE);
+    TRY();
+        tbb::parallel_do<Iterator, simple_body>(begin, end, simple_body() );
+    CATCH_AND_ASSERT();
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+
+} // void Test1_parallel_do ()
+
+template <class Iterator>
+class NestingParDoBody {
+public:
+    void operator()( size_t& /*value*/ ) const {
+        ++g_CurExecuted;
+        PREPARE_RANGE(Iterator, NESTED_ITER_RANGE);
+        tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody());
+    }
+};
+
+template <class Iterator>
+class NestingParDoBodyWithFeeder : NestingParDoBody<Iterator> {
+public:
+    void operator()( size_t& value, tbb::parallel_do_feeder<size_t>& feeder ) const {
+        Feed(feeder, 0);
+        NestingParDoBody<Iterator>::operator()(value);
+    }
+};
+
+//! Uses parallel_do body containing a nested parallel_do with the default context not wrapped by a try-block.
+/** Nested algorithms are spawned inside the new bound context by default. Since
+    exceptions thrown from the nested parallel_do are not handled by the caller
+    (nesting parallel_do body) in this test, they will cancel all the sibling nested
+    algorithms. **/
+template <class Iterator, class nesting_body>
+void Test2_parallel_do () {
+    ResetGlobals();
+    PREPARE_RANGE(Iterator, ITER_RANGE);
+    TRY();
+        tbb::parallel_do<Iterator, nesting_body >(begin, end, nesting_body() );
+    CATCH_AND_ASSERT();
+    ASSERT (exceptionCaught, "No exception thrown from the nesting parallel_for");
+    //if ( g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+} // void Test2_parallel_do ()
+
+template <class Iterator> 
+class NestingParDoBodyWithIsolatedCtx {
+public:
+    void operator()( size_t& /*value*/ ) const {
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        ++g_CurExecuted;
+        PREPARE_RANGE(Iterator, NESTED_ITER_RANGE);
+        tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody(), ctx);
+    }
+};
+
+template <class Iterator> 
+class NestingParDoBodyWithIsolatedCtxWithFeeder : NestingParDoBodyWithIsolatedCtx<Iterator> {
+public:
+    void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        Feed(feeder, 0);
+        NestingParDoBodyWithIsolatedCtx<Iterator>::operator()(value);
+    }
+};
+
+//! Uses parallel_do body invoking a nested parallel_do with an isolated context without a try-block.
+/** Even though exceptions thrown from the nested parallel_do are not handled
+    by the caller in this test, they will not affect sibling nested algorithms
+    already running because of the isolated contexts. However because the first
+    exception cancels the root parallel_do only the first g_NumThreads subranges
+    will be processed (which launch nested parallel_dos) **/
+template <class Iterator, class nesting_body>
+void Test3_parallel_do () {
+    ResetGlobals();
+    PREPARE_RANGE(Iterator, NESTING_ITER_RANGE);
+    intptr_t nestedCalls = NESTED_ITER_RANGE,
+             minExecuted = (g_NumThreads - 1) * nestedCalls;
+    TRY();
+        tbb::parallel_do<Iterator, nesting_body >(begin, end, nesting_body());
+    CATCH_AND_ASSERT();
+    ASSERT (exceptionCaught, "No exception thrown from the nesting parallel_for");
+    if ( g_SolitaryException ) {
+        ASSERT (g_CurExecuted > minExecuted, "Too few tasks survived exception");
+        ASSERT (g_CurExecuted <= minExecuted + (g_ExecutedAtCatch + g_NumThreads), "Too many tasks survived exception");
+    }
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+} // void Test3_parallel_do ()
+
+template <class Iterator>
+class NestingParDoWithEhBody {
+public:
+    void operator()( size_t& /*value*/ ) const {
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        PREPARE_RANGE(Iterator, NESTED_ITER_RANGE);
+        TRY();
+            tbb::parallel_do<Iterator, SimpleParDoBody>(begin, end, SimpleParDoBody(), ctx);
+        CATCH();
+    }
+};
+
+template <class Iterator>
+class NestingParDoWithEhBodyWithFeeder : NoAssign, NestingParDoWithEhBody<Iterator> {
+public:
+    void operator()( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        Feed(feeder, 0);
+        NestingParDoWithEhBody<Iterator>::operator()(value);
+    }
+};
+
+//! Uses parallel_for body invoking a nested parallel_for (with default bound context) inside a try-block.
+/** Since exception(s) thrown from the nested parallel_for are handled by the caller
+    in this test, they do not affect neither other tasks of the the root parallel_for
+    nor sibling nested algorithms. **/
+template <class Iterator, class nesting_body_with_eh>
+void Test4_parallel_do () {
+    ResetGlobals( true, true );
+    PREPARE_RANGE(Iterator, NESTING_ITER_RANGE);
+    TRY();
+        tbb::parallel_do<Iterator, nesting_body_with_eh>(begin, end, nesting_body_with_eh());
+    CATCH();
+    ASSERT (!exceptionCaught, "All exceptions must have been handled in the parallel_do body");
+    intptr_t nestedCalls = NESTED_ITER_RANGE,
+             nestingCalls = NESTING_ITER_RANGE + g_FedTasksCount,
+             maxExecuted = nestingCalls * nestedCalls,
+             minExecuted = 0;
+    if ( g_SolitaryException ) {
+        minExecuted = maxExecuted - nestedCalls;
+        ASSERT (g_Exceptions == 1, "No exception registered");
+        ASSERT (g_CurExecuted >= minExecuted, "Too few tasks executed");
+        ASSERT (g_CurExecuted <= minExecuted + g_NumThreads, "Too many tasks survived exception");
+    }
+    else {
+        minExecuted = g_Exceptions;
+        ASSERT (g_Exceptions > 1 && g_Exceptions <= nestingCalls, "Unexpected actual number of exceptions");
+        ASSERT (g_CurExecuted >= minExecuted, "Too many executed tasks reported");
+        ASSERT (g_CurExecuted < g_ExecutedAtCatch + g_NumThreads + nestingCalls, "Too many tasks survived multiple exceptions");
+        ASSERT (g_CurExecuted <= nestingCalls * (1 + g_NumThreads), "Too many tasks survived exception");
+    }
+} // void Test4_parallel_do ()
+
+// This body throws an exception only if the task was added by feeder
+class ParDoBodyWithThrowingFeederTasks {
+public:
+    //! This form of the function call operator can be used when the body needs to add more work during the processing
+    void operator() ( size_t &value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        ++g_CurExecuted;
+        Feed(feeder, 1);
+        if (value == 1)
+            ThrowTestException(1);
+    }
+}; // class ParDoBodyWithThrowingFeederTasks
+
+// Test exception in task, which was added by feeder.
+template <class Iterator>
+void Test5_parallel_do () {
+    ResetGlobals();
+    PREPARE_RANGE(Iterator, ITER_RANGE);
+    TRY();
+        tbb::parallel_do<Iterator, ParDoBodyWithThrowingFeederTasks>(begin, end, ParDoBodyWithThrowingFeederTasks());
+    CATCH();
+    if (g_SolitaryException)
+        ASSERT (exceptionCaught, "At least one exception should occur");
+} // void Test5_parallel_do ()
+
+#endif /* TBB_USE_EXCEPTIONS */
+
+class ParDoBodyToCancel {
+public:
+    void operator()( size_t& /*value*/ ) const {
+        ++g_CurExecuted;
+        CancellatorTask::WaitUntilReady();
+    }
+};
+
+class ParDoBodyToCancelWithFeeder : ParDoBodyToCancel {
+public:
+    void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        Feed(feeder, 0);
+        ParDoBodyToCancel::operator()(value);
+    }
+};
+
+template<class B, class Iterator>
+class ParDoWorkerTask : public tbb::task {
+    tbb::task_group_context &my_ctx;
+
+    tbb::task* execute () {
+        PREPARE_RANGE(Iterator, NESTED_ITER_RANGE);
+        tbb::parallel_do<Iterator, B>( begin, end, B(), my_ctx );
+        return NULL;
+    }
+public:
+    ParDoWorkerTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {}
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+template <class Iterator, class body_to_cancel>
+void TestCancelation1_parallel_do () {
+    ResetGlobals( false );
+    intptr_t  threshold = 10;
+    tbb::task_group_context  ctx;
+    ctx.reset();
+    tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    r.set_ref_count(3);
+    r.spawn( *new( r.allocate_child() ) CancellatorTask(ctx, threshold) );
+    __TBB_Yield();
+    r.spawn( *new( r.allocate_child() ) ParDoWorkerTask<body_to_cancel, Iterator>(ctx) );
+    TRY();
+        r.wait_for_all();
+    CATCH_AND_FAIL();
+    ASSERT (g_CurExecuted < g_ExecutedAtCatch + g_NumThreads, "Too many tasks were executed after cancellation");
+    r.destroy(r);
+}
+
+class ParDoBodyToCancel2 {
+public:
+    void operator()( size_t& /*value*/ ) const {
+        ++g_CurExecuted;
+        Harness::ConcurrencyTracker ct;
+        // The test will hang (and be timed out by the test system) if is_cancelled() is broken
+        while( !tbb::task::self().is_cancelled() )
+            __TBB_Yield();
+    }
+};
+
+class ParDoBodyToCancel2WithFeeder : ParDoBodyToCancel2 {
+public:
+    void operator()( size_t& value, tbb::parallel_do_feeder<size_t> &feeder ) const {
+        Feed(feeder, 0);
+        ParDoBodyToCancel2::operator()(value);
+    }
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+/** This version also tests task::is_cancelled() method. **/
+template <class Iterator, class body_to_cancel>
+void TestCancelation2_parallel_do () {
+    ResetGlobals();
+    RunCancellationTest<ParDoWorkerTask<body_to_cancel, Iterator>, CancellatorTask2>();
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Some tasks were executed after cancellation");
+}
+
+#define RunWithSimpleBody(func, body)       \
+    func<Harness::RandomIterator<size_t>, body>();           \
+    func<Harness::RandomIterator<size_t>, body##WithFeeder>();  \
+    func<Harness::ForwardIterator<size_t>, body>();         \
+    func<Harness::ForwardIterator<size_t>, body##WithFeeder>()
+
+#define RunWithTemplatedBody(func, body)       \
+    func<Harness::RandomIterator<size_t>, body<Harness::RandomIterator<size_t> > >();           \
+    func<Harness::RandomIterator<size_t>, body##WithFeeder<Harness::RandomIterator<size_t> > >();  \
+    func<Harness::ForwardIterator<size_t>, body<Harness::ForwardIterator<size_t> > >();         \
+    func<Harness::ForwardIterator<size_t>, body##WithFeeder<Harness::ForwardIterator<size_t> > >()
+
+void RunParDoTests() {
+    REMARK( "parallel do tests\n" );
+    tbb::task_scheduler_init init (g_NumThreads);
+    g_Master = Harness::CurrentTid();
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    RunWithSimpleBody(Test1_parallel_do, SimpleParDoBody);
+    RunWithTemplatedBody(Test2_parallel_do, NestingParDoBody);
+    RunWithTemplatedBody(Test3_parallel_do, NestingParDoBodyWithIsolatedCtx);
+    RunWithTemplatedBody(Test4_parallel_do, NestingParDoWithEhBody);
+    Test5_parallel_do<Harness::ForwardIterator<size_t> >();
+    Test5_parallel_do<Harness::RandomIterator<size_t> >();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+    RunWithSimpleBody(TestCancelation1_parallel_do, ParDoBodyToCancel);
+    RunWithSimpleBody(TestCancelation2_parallel_do, ParDoBodyToCancel2);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Tests for tbb::pipeline
+
+#define NUM_ITEMS   100
+
+const size_t c_DataEndTag = size_t(~0);
+
+int g_NumTokens = 0;
+
+// Simple input filter class, it assigns 1 to all array members
+// It stops when it receives item equal to -1
+class InputFilter: public tbb::filter {
+    tbb::atomic<size_t> m_Item;
+    size_t m_Buffer[NUM_ITEMS + 1];
+public:
+    InputFilter() : tbb::filter(parallel) {
+        m_Item = 0;
+        for (size_t i = 0; i < NUM_ITEMS; ++i )
+            m_Buffer[i] = 1;
+        m_Buffer[NUM_ITEMS] = c_DataEndTag;
+    }
+
+    void* operator()( void* ) {
+        size_t item = m_Item.fetch_and_increment();
+        if ( item >= NUM_ITEMS )
+            return NULL;
+        m_Buffer[item] = 1;
+        return &m_Buffer[item];
+    }
+
+    size_t* buffer() { return m_Buffer; }
+}; // class InputFilter
+
+// Pipeline filter, without exceptions throwing
+class NoThrowFilter : public tbb::filter {
+    size_t m_Value;
+public:
+    enum operation {
+        addition,
+        subtraction,
+        multiplication
+    } m_Operation;
+
+    NoThrowFilter(operation _operation, size_t value, bool is_parallel)
+        : filter(is_parallel? tbb::filter::parallel : tbb::filter::serial_in_order),
+        m_Value(value), m_Operation(_operation)
+    {}
+    void* operator()(void* item) {
+        size_t &value = *(size_t*)item;
+        ASSERT(value != c_DataEndTag, "terminator element is being processed");
+        switch (m_Operation){
+            case addition:
+                value += m_Value;
+                break;
+            case subtraction:
+                value -= m_Value;
+                break;
+            case multiplication:
+                value *= m_Value;
+                break;
+            default:
+                ASSERT(0, "Wrong operation parameter passed to NoThrowFilter");
+        } // switch (m_Operation)
+        return item;
+    }
+};
+
+// Test pipeline without exceptions throwing
+void Test0_pipeline () {
+    ResetGlobals();
+    // Run test when serial filter is the first non-input filter
+    InputFilter inputFilter;
+    NoThrowFilter filter1(NoThrowFilter::addition, 99, false);
+    NoThrowFilter filter2(NoThrowFilter::subtraction, 90, true);
+    NoThrowFilter filter3(NoThrowFilter::multiplication, 5, false);
+    // Result should be 50 for all items except the last
+    tbb::pipeline p;
+    p.add_filter(inputFilter);
+    p.add_filter(filter1);
+    p.add_filter(filter2);
+    p.add_filter(filter3);
+    p.run(8);
+    for (size_t i = 0; i < NUM_ITEMS; ++i)
+        ASSERT(inputFilter.buffer()[i] == 50, "pipeline didn't process items properly");
+} // void Test0_pipeline ()
+
+#if TBB_USE_EXCEPTIONS
+
+// Simple filter with exception throwing
+class SimpleFilter : public tbb::filter {
+    bool m_canThrow;
+public:
+    SimpleFilter (tbb::filter::mode _mode, bool canThrow ) : filter (_mode), m_canThrow(canThrow) {}
+
+    void* operator()(void* item) {
+        ++g_CurExecuted;
+        if ( m_canThrow ) {
+            if ( !is_serial() ) {
+                Harness::ConcurrencyTracker ct;
+                WaitUntilConcurrencyPeaks( min(g_NumTokens, g_NumThreads) );
+            }
+            ThrowTestException(1);
+        }
+        return item;
+    }
+}; // class SimpleFilter
+
+// This enumeration represents filters order in pipeline
+struct FilterSet {
+    tbb::filter::mode   mode1,
+                        mode2;
+    bool                throw1,
+                        throw2;
+
+    FilterSet( tbb::filter::mode m1, tbb::filter::mode m2, bool t1, bool t2 )
+        : mode1(m1), mode2(m2), throw1(t1), throw2(t2)
+    {}
+}; // struct FilterSet
+
+FilterSet serial_parallel( tbb::filter::serial, tbb::filter::parallel, false, true );
+
+template<typename InFilter, typename Filter>
+class CustomPipeline : protected tbb::pipeline {
+    InFilter inputFilter;
+    Filter filter1;
+    Filter filter2;
+public:
+    CustomPipeline( const FilterSet& filters )
+        : filter1(filters.mode1, filters.throw1), filter2(filters.mode2, filters.throw2)
+    {
+       add_filter(inputFilter);
+       add_filter(filter1);
+       add_filter(filter2);
+    }
+    void run () { tbb::pipeline::run(g_NumTokens); }
+    void run ( tbb::task_group_context& ctx ) { tbb::pipeline::run(g_NumTokens, ctx); }
+
+    using tbb::pipeline::add_filter;
+};
+
+typedef CustomPipeline<InputFilter, SimpleFilter> SimplePipeline;
+
+// Tests exceptions without nesting
+void Test1_pipeline ( const FilterSet& filters ) {
+    ResetGlobals();
+    SimplePipeline testPipeline(filters);
+    TRY();
+        testPipeline.run();
+        if ( g_CurExecuted == 2 * NUM_ITEMS ) {
+            // In case of all serial filters they might be all executed in the thread(s)
+            // where exceptions are not allowed by the common test logic. So we just quit.
+            return;
+        }
+    CATCH_AND_ASSERT();
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+
+} // void Test1_pipeline ()
+
+// Filter with nesting
+class NestingFilter : public tbb::filter {
+public:
+    NestingFilter (tbb::filter::mode _mode, bool ) : filter (_mode) {}
+
+    void* operator()(void* item) {
+        ++g_CurExecuted;
+        SimplePipeline testPipeline(serial_parallel);
+        testPipeline.run();
+        return item;
+    }
+}; // class NestingFilter
+
+//! Uses pipeline containing a nested pipeline with the default context not wrapped by a try-block.
+/** Nested algorithms are spawned inside the new bound context by default. Since
+    exceptions thrown from the nested pipeline are not handled by the caller
+    (nesting pipeline body) in this test, they will cancel all the sibling nested
+    algorithms. **/
+void Test2_pipeline ( const FilterSet& filters ) {
+    ResetGlobals();
+    CustomPipeline<InputFilter, NestingFilter> testPipeline(filters);
+    TRY();
+        testPipeline.run();
+    CATCH_AND_ASSERT();
+    ASSERT (exceptionCaught, "No exception thrown from the nesting pipeline");
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+} // void Test2_pipeline ()
+
+class NestingFilterWithIsolatedCtx : public tbb::filter {
+public:
+    NestingFilterWithIsolatedCtx(tbb::filter::mode m, bool ) : filter(m) {}
+
+    void* operator()(void* item) {
+        ++g_CurExecuted;
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        SimplePipeline testPipeline(serial_parallel);
+        testPipeline.run(ctx);
+        return item;
+    }
+}; // class NestingFilterWithIsolatedCtx
+
+//! Uses pipeline invoking a nested pipeline with an isolated context without a try-block.
+/** Even though exceptions thrown from the nested pipeline are not handled
+    by the caller in this test, they will not affect sibling nested algorithms
+    already running because of the isolated contexts. However because the first
+    exception cancels the root parallel_do only the first g_NumThreads subranges
+    will be processed (which launch nested pipelines) **/
+void Test3_pipeline ( const FilterSet& filters ) {
+    ResetGlobals();
+    intptr_t nestedCalls = 100,
+             minExecuted = (g_NumThreads - 1) * nestedCalls;
+    CustomPipeline<InputFilter, NestingFilterWithIsolatedCtx> testPipeline(filters);
+    TRY();
+        testPipeline.run();
+    CATCH_AND_ASSERT();
+    ASSERT (exceptionCaught, "No exception thrown from the nesting parallel_for");
+    if ( g_SolitaryException ) {
+        ASSERT (g_CurExecuted > minExecuted, "Too few tasks survived exception");
+        ASSERT (g_CurExecuted <= minExecuted + (g_ExecutedAtCatch + g_NumThreads), "Too many tasks survived exception");
+    }
+    ASSERT (g_Exceptions == 1, "No try_blocks in any body expected in this test");
+    if ( !g_SolitaryException )
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived exception");
+} // void Test3_pipeline ()
+
+class NestingFilterWithEhBody : public tbb::filter {
+public:
+    NestingFilterWithEhBody(tbb::filter::mode m, bool ) : filter(m) {}
+
+    void* operator()(void* item) {
+        tbb::task_group_context ctx(tbb::task_group_context::isolated);
+        SimplePipeline testPipeline(serial_parallel);
+        TRY();
+            testPipeline.run(ctx);
+        CATCH();
+        return item;
+    }
+}; // class NestingFilterWithEhBody
+
+//! Uses pipeline body invoking a nested pipeline (with default bound context) inside a try-block.
+/** Since exception(s) thrown from the nested pipeline are handled by the caller
+    in this test, they do not affect neither other tasks of the the root pipeline
+    nor sibling nested algorithms. **/
+void Test4_pipeline ( const FilterSet& filters ) {
+#if __GNUC__ && !__INTEL_COMPILER
+    if ( strncmp(__VERSION__, "4.1.0", 5) == 0 ) {
+        REMARK_ONCE("Known issue: one of exception handling tests is skipped.\n");
+        return;
+    }
+#endif
+    ResetGlobals( true, true );
+    intptr_t nestedCalls = NUM_ITEMS + 1,
+             nestingCalls = 2 * (NUM_ITEMS + 1),
+             maxExecuted = nestingCalls * nestedCalls;
+    CustomPipeline<InputFilter, NestingFilterWithEhBody> testPipeline(filters);
+    TRY();
+        testPipeline.run();
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "All exceptions must have been handled in the parallel_do body");
+    intptr_t  minExecuted = 0;
+    if ( g_SolitaryException ) {
+        minExecuted = maxExecuted - nestedCalls;
+        ASSERT (g_Exceptions != 0, "No exception registered");
+        ASSERT (g_CurExecuted <= minExecuted + g_NumThreads, "Too many tasks survived exception");
+    }
+    else {
+        minExecuted = g_Exceptions;
+        ASSERT (g_Exceptions > 1 && g_Exceptions <= nestingCalls, "Unexpected actual number of exceptions");
+        ASSERT (g_CurExecuted >= minExecuted, "Too many executed tasks reported");
+        ASSERT (g_CurExecuted <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks survived multiple exceptions");
+    }
+} // void Test4_pipeline ()
+
+//! Testing filter::finalize method
+#define BUFFER_SIZE     32
+#define NUM_BUFFERS     1024
+
+tbb::atomic<size_t> g_AllocatedCount; // Number of currently allocated buffers
+tbb::atomic<size_t> g_TotalCount; // Total number of allocated buffers
+
+//! Base class for all filters involved in finalize method testing
+class FinalizationBaseFilter : public tbb::filter {
+public:
+    FinalizationBaseFilter ( tbb::filter::mode m ) : filter(m) {}
+
+    // Deletes buffers if exception occured
+    virtual void finalize( void* item ) {
+        size_t* m_Item = (size_t*)item;
+        delete[] m_Item;
+        --g_AllocatedCount;
+    }
+};
+
+//! Input filter to test finalize method
+class InputFilterWithFinalization: public FinalizationBaseFilter {
+public:
+    InputFilterWithFinalization() : FinalizationBaseFilter(tbb::filter::serial) {
+        g_TotalCount = 0;
+    }
+    void* operator()( void* ){
+        if (g_TotalCount == NUM_BUFFERS)
+            return NULL;
+        size_t* item = new size_t[BUFFER_SIZE];
+        for (int i = 0; i < BUFFER_SIZE; i++)
+            item[i] = 1;
+        ++g_TotalCount;
+        ++g_AllocatedCount;
+        return item;
+    }
+};
+
+// The filter multiplies each buffer item by 10.
+class ProcessingFilterWithFinalization : public FinalizationBaseFilter {
+public:
+    ProcessingFilterWithFinalization (tbb::filter::mode _mode, bool) : FinalizationBaseFilter (_mode) {}
+
+    void* operator()( void* item) {
+        if (g_TotalCount > NUM_BUFFERS / 2)
+            ThrowTestException(1);
+        size_t* m_Item = (size_t*)item;
+        for (int i = 0; i < BUFFER_SIZE; i++)
+            m_Item[i] *= 10;
+        return item;
+    }
+};
+
+// Output filter deletes previously allocated buffer
+class OutputFilterWithFinalization : public FinalizationBaseFilter {
+public:
+    OutputFilterWithFinalization (tbb::filter::mode m) : FinalizationBaseFilter (m) {}
+
+    void* operator()( void* item){
+        size_t* m_Item = (size_t*)item;
+        delete[] m_Item;
+        --g_AllocatedCount;
+        return NULL;
+    }
+};
+
+//! Tests filter::finalize method
+void Test5_pipeline ( const FilterSet& filters ) {
+    ResetGlobals();
+    g_AllocatedCount = 0;
+    CustomPipeline<InputFilterWithFinalization, ProcessingFilterWithFinalization> testPipeline(filters);
+    OutputFilterWithFinalization my_output_filter(tbb::filter::parallel);
+
+    testPipeline.add_filter(my_output_filter);
+    TRY();
+        testPipeline.run();
+    CATCH();
+    ASSERT (g_AllocatedCount == 0, "Memory leak: Some my_object weren't destroyed");
+} // void Test5_pipeline ()
+
+//! Tests pipeline function passed with different combination of filters
+template<void testFunc(const FilterSet&)>
+void TestWithDifferentFilters() {
+    const int NumFilterTypes = 3;
+    const tbb::filter::mode modes[NumFilterTypes] = {
+            tbb::filter::parallel,
+            tbb::filter::serial,
+            tbb::filter::serial_out_of_order
+        };
+    for ( int i = 0; i < NumFilterTypes; ++i ) {
+        for ( int j = 0; j < NumFilterTypes; ++j ) {
+            for ( int k = 0; k < 2; ++k )
+                testFunc( FilterSet(modes[i], modes[j], k == 0, k != 0) );
+        }
+    }
+}
+
+#endif /* TBB_USE_EXCEPTIONS */
+
+class FilterToCancel : public tbb::filter {
+public:
+    FilterToCancel(bool is_parallel)
+        : filter( is_parallel ? tbb::filter::parallel : tbb::filter::serial_in_order )
+    {}
+    void* operator()(void* item) {
+        ++g_CurExecuted;
+        CancellatorTask::WaitUntilReady();
+        return item;
+    }
+}; // class FilterToCancel
+
+template <class Filter_to_cancel> 
+class PipelineLauncherTask : public tbb::task {
+    tbb::task_group_context &my_ctx;
+public:
+    PipelineLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {}
+
+    tbb::task* execute () {
+        // Run test when serial filter is the first non-input filter
+        InputFilter inputFilter;
+        Filter_to_cancel filterToCancel(true);
+        tbb::pipeline p;
+        p.add_filter(inputFilter);
+        p.add_filter(filterToCancel);
+        p.run(g_NumTokens, my_ctx);
+        return NULL;
+    }
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+void TestCancelation1_pipeline () {
+    ResetGlobals();
+    g_ThrowException = false;
+    intptr_t  threshold = 10;
+    tbb::task_group_context ctx;
+    ctx.reset();
+    tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    r.set_ref_count(3);
+    r.spawn( *new( r.allocate_child() ) CancellatorTask(ctx, threshold) );
+    __TBB_Yield();
+    r.spawn( *new( r.allocate_child() ) PipelineLauncherTask<FilterToCancel>(ctx) );
+    TRY();
+        r.wait_for_all();
+    CATCH_AND_FAIL();
+    r.destroy(r);
+    ASSERT (g_CurExecuted < g_ExecutedAtCatch + g_NumThreads, "Too many tasks were executed after cancellation");
+}
+
+class FilterToCancel2 : public tbb::filter {
+public:
+    FilterToCancel2(bool is_parallel)
+        : filter ( is_parallel ? tbb::filter::parallel : tbb::filter::serial_in_order)
+    {}
+
+    void* operator()(void* item) {
+        ++g_CurExecuted;
+        Harness::ConcurrencyTracker ct;
+        // The test will hang (and be timed out by the tesst system) if is_cancelled() is broken
+        while( !tbb::task::self().is_cancelled() )
+            __TBB_Yield();
+        return item;
+    }
+};
+
+//! Test for cancelling an algorithm from outside (from a task running in parallel with the algorithm).
+/** This version also tests task::is_cancelled() method. **/
+void TestCancelation2_pipeline () {
+    ResetGlobals();
+    RunCancellationTest<PipelineLauncherTask<FilterToCancel2>, CancellatorTask2>();
+    ASSERT (g_CurExecuted <= g_ExecutedAtCatch, "Some tasks were executed after cancellation");
+}
+
+void RunPipelineTests() {
+    REMARK( "pipeline tests\n" );
+    tbb::task_scheduler_init init (g_NumThreads);
+    g_Master = Harness::CurrentTid();
+    g_NumTokens = 2 * g_NumThreads;
+
+    Test0_pipeline();
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    TestWithDifferentFilters<Test1_pipeline>();
+    TestWithDifferentFilters<Test2_pipeline>();
+    TestWithDifferentFilters<Test3_pipeline>();
+    TestWithDifferentFilters<Test4_pipeline>();
+    TestWithDifferentFilters<Test5_pipeline>();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+    TestCancelation1_pipeline();
+    TestCancelation2_pipeline();
+}
+
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+#if TBB_USE_EXCEPTIONS
+
+class MyCapturedException : public tbb::captured_exception {
+public:
+    static int m_refCount;
+
+    MyCapturedException () : tbb::captured_exception("MyCapturedException", "test") { ++m_refCount; }
+    ~MyCapturedException () throw() { --m_refCount; }
+
+    MyCapturedException* move () throw() {
+        MyCapturedException* movee = (MyCapturedException*)malloc(sizeof(MyCapturedException));
+        return ::new (movee) MyCapturedException;
+    }
+    void destroy () throw() {
+        this->~MyCapturedException();
+        free(this);
+    }
+    void operator delete ( void* p ) { free(p); }
+};
+
+int MyCapturedException::m_refCount = 0;
+
+void DeleteTbbException ( volatile tbb::tbb_exception* pe ) {
+    delete pe;
+}
+
+void TestTbbExceptionAPI () {
+    const char *name = "Test captured exception",
+               *reason = "Unit testing";
+    tbb::captured_exception e(name, reason);
+    ASSERT (strcmp(e.name(), name) == 0, "Setting captured exception name failed");
+    ASSERT (strcmp(e.what(), reason) == 0, "Setting captured exception reason failed");
+    tbb::captured_exception c(e);
+    ASSERT (strcmp(c.name(), e.name()) == 0, "Copying captured exception name failed");
+    ASSERT (strcmp(c.what(), e.what()) == 0, "Copying captured exception reason failed");
+    tbb::captured_exception *m = e.move();
+    ASSERT (strcmp(m->name(), name) == 0, "Moving captured exception name failed");
+    ASSERT (strcmp(m->what(), reason) == 0, "Moving captured exception reason failed");
+    ASSERT (!e.name() && !e.what(), "Moving semantics broken");
+    m->destroy();
+
+    MyCapturedException mce;
+    MyCapturedException *mmce = mce.move();
+    ASSERT( MyCapturedException::m_refCount == 2, NULL );
+    DeleteTbbException(mmce);
+    ASSERT( MyCapturedException::m_refCount == 1, NULL );
+}
+
+#endif /* TBB_USE_EXCEPTIONS */
+
+/** If min and max thread numbers specified on the command line are different,
+    the test is run only for 2 sizes of the thread pool (MinThread and MaxThread)
+    to be able to test the high and low contention modes while keeping the test reasonably fast **/
+int TestMain () {
+    REMARK ("Using %s\n", TBB_USE_CAPTURED_EXCEPTION ? "tbb:captured_exception" : "exact exception propagation");
+    MinThread = min(tbb::task_scheduler_init::default_num_threads(), max(2, MinThread));
+    MaxThread = max(MinThread, min(tbb::task_scheduler_init::default_num_threads(), MaxThread));
+    ASSERT (FLAT_RANGE >= FLAT_GRAIN * MaxThread, "Fix defines");
+#if __TBB_TASK_GROUP_CONTEXT
+    int step = max((MaxThread - MinThread + 1)/2, 1);
+    for ( g_NumThreads = MinThread; g_NumThreads <= MaxThread; g_NumThreads += step ) {
+        REMARK ("Number of threads %d\n", g_NumThreads);
+        // Execute in all the possible modes
+        for ( size_t j = 0; j < 4; ++j ) {
+            g_ExceptionInMaster = (j & 1) == 1;
+            g_SolitaryException = (j & 2) == 1;
+            RunParForAndReduceTests();
+            RunParDoTests();
+            RunPipelineTests();
+        }
+    }
+#if TBB_USE_EXCEPTIONS
+    TestTbbExceptionAPI();
+#endif
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception handling tests are skipped.\n");
+#endif
+    return Harness::Done;
+#else  /* !__TBB_TASK_GROUP_CONTEXT */
+    return Harness::Skipped;
+#endif /* !__TBB_TASK_GROUP_CONTEXT */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_tasks.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_eh_tasks.cpp
new file mode 100644 (file)
index 0000000..44ed1f3
--- /dev/null
@@ -0,0 +1,763 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define  __TBB_COUNT_TASK_NODES 1
+#include "harness_inject_scheduler.h"
+
+#if __TBB_TASK_GROUP_CONTEXT
+
+#define __TBB_ATOMICS_CODEGEN_BROKEN __SUNPRO_CC
+
+#include "tbb/task_scheduler_init.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/tick_count.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <string>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#define NUM_CHILD_TASKS                 256
+#define NUM_ROOT_TASKS                  32
+#define NUM_ROOTS_IN_GROUP              8
+
+//! Statistics about number of tasks in different states
+class TaskStats {
+    typedef tbb::spin_mutex::scoped_lock lock_t;
+    //! Number of tasks allocated that was ever allocated
+    volatile intptr_t m_Existed;
+    //! Number of tasks executed to the moment
+    volatile intptr_t m_Executed;
+    //! Number of tasks allocated but not yet destroyed to the moment
+    volatile intptr_t m_Existing;
+
+    mutable tbb::spin_mutex  m_Mutex;
+public:
+    //! Assumes that assignment is noncontended for the left-hand operand
+    const TaskStats& operator= ( const TaskStats& rhs ) {
+        if ( this != &rhs ) {
+            lock_t lock(rhs.m_Mutex);
+            m_Existed = rhs.m_Existed;
+            m_Executed = rhs.m_Executed;
+            m_Existing = rhs.m_Existing;
+        }
+        return *this;
+    }
+    intptr_t Existed() const { return m_Existed; }
+    intptr_t Executed() const { return m_Executed; }
+    intptr_t Existing() const { return m_Existing; }
+    void IncExisted() { lock_t lock(m_Mutex); ++m_Existed; ++m_Existing; }
+    void IncExecuted() { lock_t lock(m_Mutex); ++m_Executed; }
+    void DecExisting() { lock_t lock(m_Mutex); --m_Existing; }
+    //! Assumed to be used in uncontended manner only
+    void Reset() { m_Executed = m_Existing = m_Existed = 0; }
+};
+
+TaskStats g_CurStat;
+
+inline intptr_t Existed () { return g_CurStat.Existed(); }
+
+#include "harness_eh.h"
+
+bool g_BoostExecutedCount = true;
+volatile bool g_TaskWasCancelled = false;
+
+inline void ResetGlobals () {
+    ResetEhGlobals();
+    g_BoostExecutedCount = true;
+    g_TaskWasCancelled = false;
+    g_CurStat.Reset();
+}
+
+#define ASSERT_TEST_POSTCOND() \
+    ASSERT (g_CurStat.Existed() >= g_CurStat.Executed(), "Total number of tasks is less than executed");  \
+    ASSERT (!g_CurStat.Existing(), "Not all task objects have been destroyed"); \
+    ASSERT (!tbb::task::self().is_cancelled(), "Scheduler's default context has not been cleaned up properly");
+
+inline void WaitForException () {
+    int n = 0;
+    while ( ++n < c_Timeout && !__TBB_load_with_acquire(g_ExceptionCaught) )
+        __TBB_Yield();
+    ASSERT_WARNING( n < c_Timeout, "WaitForException failed" );
+}
+
+class TaskBase : public tbb::task {
+    tbb::task* execute () {
+        tbb::task* t = NULL;
+        __TBB_TRY { 
+            t = do_execute();
+        } __TBB_CATCH( ... ) { 
+            g_CurStat.IncExecuted(); 
+            __TBB_RETHROW();
+        }
+        g_CurStat.IncExecuted();
+        return t;
+    }
+protected:
+    TaskBase ( bool throwException = true ) : m_Throw(throwException) { g_CurStat.IncExisted(); }
+    ~TaskBase () { g_CurStat.DecExisting(); }
+
+    virtual tbb::task* do_execute () = 0;
+
+    bool m_Throw;
+}; // class TaskBase
+
+class LeafTask : public TaskBase {
+    tbb::task* do_execute () {
+        Harness::ConcurrencyTracker ct;
+        WaitUntilConcurrencyPeaks();
+        if ( g_BoostExecutedCount )
+            ++g_CurExecuted;
+        if ( m_Throw )
+            ThrowTestException(NUM_CHILD_TASKS/2);
+        if ( !g_ThrowException )
+            __TBB_Yield();
+        return NULL;
+    }
+public:
+    LeafTask ( bool throw_exception = true ) : TaskBase(throw_exception) {}
+};
+
+class SimpleRootTask : public TaskBase {
+    tbb::task* do_execute () {
+        set_ref_count(NUM_CHILD_TASKS + 1);
+        for ( size_t i = 0; i < NUM_CHILD_TASKS; ++i )
+            spawn( *new( allocate_child() ) LeafTask(m_Throw) );
+        wait_for_all();
+        return NULL;
+    }
+public:
+    SimpleRootTask ( bool throw_exception = true ) : TaskBase(throw_exception) {}
+};
+
+#if TBB_USE_EXCEPTIONS
+
+class SimpleThrowingTask : public tbb::task {
+public:
+    tbb::task* execute () { throw 0; }
+    ~SimpleThrowingTask() {}
+};
+
+//! Checks if innermost running task information is updated correctly during cancellation processing
+void Test0 () {
+    tbb::task_scheduler_init init (1);
+    tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task_list tl;
+    tl.push_back( *new( r.allocate_child() ) SimpleThrowingTask );
+    tl.push_back( *new( r.allocate_child() ) SimpleThrowingTask );
+    r.set_ref_count( 3 );
+    try {
+        r.spawn_and_wait_for_all( tl );
+    }
+    catch (...) {}
+    r.destroy( r );
+}
+
+//! Default exception behavior test. 
+/** Allocates a root task that spawns a bunch of children, one or several of which throw 
+    a test exception in a worker or master thread (depending on the global setting). **/
+void Test1 () {
+    ResetGlobals();
+    tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    ASSERT (!g_CurStat.Existing() && !g_CurStat.Existed() && !g_CurStat.Executed(), 
+            "something wrong with the task accounting");
+    r.set_ref_count(NUM_CHILD_TASKS + 1);
+    for ( int i = 0; i < NUM_CHILD_TASKS; ++i )
+        r.spawn( *new( r.allocate_child() ) LeafTask );
+    TRY();
+        r.wait_for_all();
+    CATCH_AND_ASSERT();
+    r.destroy(r);
+    ASSERT_TEST_POSTCOND();
+} // void Test1 ()
+
+//! Default exception behavior test. 
+/** Allocates and spawns root task that runs a bunch of children, one of which throws
+    a test exception in a worker thread. (Similar to Test1, except that the root task 
+    is spawned by the test function, and children are created by the root task instead 
+    of the test function body.) **/
+void Test2 () {
+    ResetGlobals();
+    SimpleRootTask &r = *new( tbb::task::allocate_root() ) SimpleRootTask;
+    ASSERT (g_CurStat.Existing() == 1 && g_CurStat.Existed() == 1 && !g_CurStat.Executed(), 
+            "something wrong with the task accounting");
+    TRY();
+        tbb::task::spawn_root_and_wait(r);
+    CATCH_AND_ASSERT();
+    ASSERT (g_ExceptionCaught, "no exception occurred");
+    ASSERT_TEST_POSTCOND();
+} // void Test2 ()
+
+//! The same as Test2() except the root task has explicit context.
+/** The context is initialized as bound in order to check correctness of its associating 
+    with a root task. **/
+void Test3 () {
+    ResetGlobals();
+    tbb::task_group_context  ctx(tbb::task_group_context::bound);
+    SimpleRootTask &r = *new( tbb::task::allocate_root(ctx) ) SimpleRootTask;
+    ASSERT (g_CurStat.Existing() == 1 && g_CurStat.Existed() == 1 && !g_CurStat.Executed(), 
+            "something wrong with the task accounting");
+    TRY();
+        tbb::task::spawn_root_and_wait(r);
+    CATCH_AND_ASSERT();
+    ASSERT (g_ExceptionCaught, "no exception occurred");
+    ASSERT_TEST_POSTCOND();
+} // void Test2 ()
+
+class RootLauncherTask : public TaskBase {
+    tbb::task_group_context::kind_type m_CtxKind;
+    tbb::task* do_execute () {
+        tbb::task_group_context  ctx(m_CtxKind);
+        SimpleRootTask &r = *new( allocate_root(ctx) ) SimpleRootTask;
+        TRY();
+            spawn_root_and_wait(r);
+            // Give a child of our siblings a chance to throw the test exception
+            WaitForException();
+        CATCH();
+        ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException, "unknown exception was caught");
+        return NULL;
+    }
+public:
+    RootLauncherTask ( tbb::task_group_context::kind_type ctx_kind = tbb::task_group_context::isolated ) : m_CtxKind(ctx_kind) {}
+};
+
+/** Allocates and spawns a bunch of roots, which allocate and spawn new root with 
+    isolated context, which at last spawns a bunch of children each, one of which 
+    throws a test exception in a worker thread. **/
+void Test4 () {
+    ResetGlobals();
+    tbb::task_list  tl;
+    for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i )
+        tl.push_back( *new( tbb::task::allocate_root() ) RootLauncherTask );
+    TRY();
+        tbb::task::spawn_root_and_wait(tl);
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "exception in this scope is unexpected");
+    intptr_t  num_tasks_expected = NUM_ROOT_TASKS * (NUM_CHILD_TASKS + 2);
+    ASSERT (g_CurStat.Existed() == num_tasks_expected, "Wrong total number of tasks");
+    if ( g_SolitaryException )
+        ASSERT (g_CurStat.Executed() >= num_tasks_expected - NUM_CHILD_TASKS, "Unexpected number of executed tasks");
+    ASSERT_TEST_POSTCOND();
+} // void Test4 ()
+
+class RootsGroupLauncherTask : public TaskBase {
+    tbb::task* do_execute () {
+        tbb::task_group_context  ctx (tbb::task_group_context::isolated);
+        tbb::task_list  tl;
+        for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i )
+            tl.push_back( *new( allocate_root(ctx) ) SimpleRootTask );
+        TRY();
+            spawn_root_and_wait(tl);
+            // Give worker a chance to throw exception
+            WaitForException();
+        CATCH_AND_ASSERT();
+        return NULL;
+    }
+};
+
+/** Allocates and spawns a bunch of roots, which allocate and spawn groups of roots 
+    with an isolated context shared by all group members, which at last spawn a bunch 
+    of children each, one of which throws a test exception in a worker thread. **/
+void Test5 () {
+    ResetGlobals();
+    tbb::task_list  tl;
+    for ( size_t i = 0; i < NUM_ROOTS_IN_GROUP; ++i )
+        tl.push_back( *new( tbb::task::allocate_root() ) RootsGroupLauncherTask );
+    TRY();
+        tbb::task::spawn_root_and_wait(tl);
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "unexpected exception intercepted");
+    if ( g_SolitaryException )  {
+        intptr_t  num_tasks_expected = NUM_ROOTS_IN_GROUP * (1 + NUM_ROOT_TASKS * (1 + NUM_CHILD_TASKS));
+        intptr_t  min_num_tasks_executed = num_tasks_expected - NUM_ROOT_TASKS * (NUM_CHILD_TASKS + 1);
+        ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed");
+    }
+    ASSERT_TEST_POSTCOND();
+} // void Test5 ()
+
+class ThrowingRootLauncherTask : public TaskBase {
+    tbb::task* do_execute () {
+        tbb::task_group_context  ctx (tbb::task_group_context::bound);
+        SimpleRootTask &r = *new( allocate_root(ctx) ) SimpleRootTask(false);
+        TRY();
+            spawn_root_and_wait(r);
+        CATCH();
+        ASSERT (!exceptionCaught, "unexpected exception intercepted");
+        ThrowTestException(NUM_CHILD_TASKS);
+        g_TaskWasCancelled |= is_cancelled();
+        return NULL;
+    }
+};
+
+class BoundHierarchyLauncherTask : public TaskBase {
+    bool m_Recover;
+
+    void alloc_roots ( tbb::task_group_context& ctx, tbb::task_list& tl ) {
+        for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i )
+            tl.push_back( *new( allocate_root(ctx) ) ThrowingRootLauncherTask );
+    }
+
+    tbb::task* do_execute () {
+        tbb::task_group_context  ctx (tbb::task_group_context::isolated);
+        tbb::task_list tl;
+        alloc_roots(ctx, tl);
+        TRY();
+            spawn_root_and_wait(tl);
+        CATCH_AND_ASSERT();
+        ASSERT (exceptionCaught, "no exception occurred");
+        ASSERT (!tl.empty(), "task list was cleared somehow");
+        if ( g_SolitaryException )
+            ASSERT (g_TaskWasCancelled, "No tasks were cancelled despite of exception");
+        if ( m_Recover ) {
+            // Test task_group_context::unbind and task_group_context::reset methods
+            g_ThrowException = false;
+            exceptionCaught = false;
+            tl.clear();
+            alloc_roots(ctx, tl);
+            ctx.reset();
+            try {
+                spawn_root_and_wait(tl);
+            }
+            catch (...) {
+                exceptionCaught = true;
+            }
+            ASSERT (!exceptionCaught, "unexpected exception occurred");
+        }
+        return NULL;
+    }
+public:
+    BoundHierarchyLauncherTask ( bool recover = false ) : m_Recover(recover) {}
+
+}; // class BoundHierarchyLauncherTask
+
+//! Test for bound contexts forming 2 level tree. Exception is thrown on the 1st (root) level.
+/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing 
+    the same isolated context, each of which in their turn spawns a single 3rd level 
+    root with  the bound context, and these 3rd level roots spawn bunches of leaves 
+    in the end. Leaves do not generate exceptions. The test exception is generated 
+    by one of the 2nd level roots. **/
+void Test6 () {
+    ResetGlobals();
+    BoundHierarchyLauncherTask &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask;
+    TRY();
+        tbb::task::spawn_root_and_wait(r);
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "unexpected exception intercepted");
+    // After the first of the branches (ThrowingRootLauncherTask) completes, 
+    // the rest of the task tree may be collapsed before having a chance to execute leaves.
+    // A number of branches running concurrently with the first one will be able to spawn leaves though.
+    /// \todo: If additional checkpoints are added to scheduler the following assertion must weaken
+    intptr_t  num_tasks_expected = 1 + NUM_ROOT_TASKS * (2 + NUM_CHILD_TASKS);
+    intptr_t  min_num_tasks_created = 1 + g_NumThreads * 2 + NUM_CHILD_TASKS;
+    // 2 stands for BoundHierarchyLauncherTask and SimpleRootTask
+    // 1 corresponds to BoundHierarchyLauncherTask 
+    intptr_t  min_num_tasks_executed = 2 + 1 + NUM_CHILD_TASKS;
+    ASSERT (g_CurStat.Existed() <= num_tasks_expected, "Number of expected tasks is calculated incorrectly");
+    ASSERT (g_CurStat.Existed() >= min_num_tasks_created, "Too few tasks created");
+    ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed");
+    ASSERT_TEST_POSTCOND();
+} // void Test6 ()
+
+//! Tests task_group_context::unbind and task_group_context::reset methods.
+/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing 
+    the same isolated context, each of which in their turn spawns a single 3rd level 
+    root with  the bound context, and these 3rd level roots spawn bunches of leaves 
+    in the end. Leaves do not generate exceptions. The test exception is generated 
+    by one of the 2nd level roots. **/
+void Test7 () {
+    ResetGlobals();
+    BoundHierarchyLauncherTask &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask;
+    TRY();
+        tbb::task::spawn_root_and_wait(r);
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "unexpected exception intercepted");
+    ASSERT_TEST_POSTCOND();
+} // void Test6 ()
+
+class BoundHierarchyLauncherTask2 : public TaskBase {
+    tbb::task* do_execute () {
+        tbb::task_group_context  ctx;
+        tbb::task_list  tl;
+        for ( size_t i = 0; i < NUM_ROOT_TASKS; ++i )
+            tl.push_back( *new( allocate_root(ctx) ) RootLauncherTask(tbb::task_group_context::bound) );
+        TRY();
+            spawn_root_and_wait(tl);
+        CATCH_AND_ASSERT();
+        // Exception must be intercepted by RootLauncherTask
+        ASSERT (!exceptionCaught, "no exception occurred");
+        return NULL;
+    }
+}; // class BoundHierarchyLauncherTask2
+
+//! Test for bound contexts forming 2 level tree. Exception is thrown in the 2nd (outer) level.
+/** Allocates and spawns a root that spawns a bunch of 2nd level roots sharing 
+    the same isolated context, each of which in their turn spawns a single 3rd level 
+    root with  the bound context, and these 3rd level roots spawn bunches of leaves 
+    in the end. The test exception is generated by one of the leaves. **/
+void Test8 () {
+    ResetGlobals();
+    BoundHierarchyLauncherTask2 &r = *new( tbb::task::allocate_root() ) BoundHierarchyLauncherTask2;
+    TRY();
+        tbb::task::spawn_root_and_wait(r);
+    CATCH_AND_ASSERT();
+    ASSERT (!exceptionCaught, "unexpected exception intercepted");
+    if ( g_SolitaryException )  {
+        intptr_t  num_tasks_expected = 1 + NUM_ROOT_TASKS * (2 + NUM_CHILD_TASKS);
+        intptr_t  min_num_tasks_created = 1 + g_NumThreads * (2 + NUM_CHILD_TASKS);
+        intptr_t  min_num_tasks_executed = num_tasks_expected - (NUM_CHILD_TASKS + 1);
+        ASSERT (g_CurStat.Existed() <= num_tasks_expected, "Number of expected tasks is calculated incorrectly");
+        ASSERT (g_CurStat.Existed() >= min_num_tasks_created, "Too few tasks created");
+        ASSERT (g_CurStat.Executed() >= min_num_tasks_executed, "Too few tasks executed");
+    }
+    ASSERT_TEST_POSTCOND();
+} // void Test8 ()
+
+template<typename T>
+void ThrowMovableException ( intptr_t threshold, const T& data ) {
+    if ( !IsThrowingThread() )
+        return; 
+    if ( !g_SolitaryException ) {
+#if __TBB_ATOMICS_CODEGEN_BROKEN
+        g_ExceptionsThrown = g_ExceptionsThrown + 1;
+#else
+        ++g_ExceptionsThrown;
+#endif
+        throw tbb::movable_exception<T>(data);
+    }
+    while ( g_CurStat.Existed() < threshold )
+        __TBB_Yield();
+    if ( g_ExceptionsThrown.compare_and_swap(1, 0) == 0 )
+        throw tbb::movable_exception<T>(data);
+}
+
+const int g_IntExceptionData = -375;
+const std::string g_StringExceptionData = "My test string";
+
+// Exception data class implementing minimal requirements of tbb::movable_exception 
+class ExceptionData {
+    const ExceptionData& operator = ( const ExceptionData& src );
+    explicit ExceptionData ( int n ) : m_Int(n), m_String(g_StringExceptionData) {}
+public:
+    ExceptionData ( const ExceptionData& src ) : m_Int(src.m_Int), m_String(src.m_String) {}
+    ~ExceptionData () {}
+
+    int m_Int;
+    std::string m_String;
+
+    // Simple way to provide an instance when all initializing constructors are private
+    // and to avoid memory reclamation problems.
+    static ExceptionData s_data;
+};
+
+ExceptionData ExceptionData::s_data(g_IntExceptionData);
+
+typedef tbb::movable_exception<int> SolitaryMovableException;
+typedef tbb::movable_exception<ExceptionData> MultipleMovableException;
+
+class LeafTaskWithMovableExceptions : public TaskBase {
+    bool m_IntAsData;
+
+    tbb::task* do_execute () {
+        Harness::ConcurrencyTracker ct;
+        WaitUntilConcurrencyPeaks();
+        if ( g_SolitaryException )
+            ThrowMovableException<int>(NUM_CHILD_TASKS/2, g_IntExceptionData);
+        else
+            ThrowMovableException<ExceptionData>(NUM_CHILD_TASKS/2, ExceptionData::s_data);
+        return NULL;
+    }
+};
+
+void CheckException ( tbb::tbb_exception& e ) {
+    ASSERT (strcmp(e.name(), (g_SolitaryException ? typeid(SolitaryMovableException) 
+                                                   : typeid(MultipleMovableException)).name() ) == 0, 
+                                                   "Unexpected original exception name");
+    ASSERT (strcmp(e.what(), "tbb::movable_exception") == 0, "Unexpected original exception info ");
+    if ( g_SolitaryException ) {
+        SolitaryMovableException& me = dynamic_cast<SolitaryMovableException&>(e);
+        ASSERT (me.data() == g_IntExceptionData, "Unexpected solitary movable_exception data");
+    }
+    else {
+        MultipleMovableException& me = dynamic_cast<MultipleMovableException&>(e);
+        ASSERT (me.data().m_Int == g_IntExceptionData, "Unexpected multiple movable_exception int data");
+        ASSERT (me.data().m_String == g_StringExceptionData, "Unexpected multiple movable_exception string data");
+    }
+}
+
+void CheckException () {
+    try {
+        throw;
+    } catch ( tbb::tbb_exception& e ) {
+        CheckException(e);
+    }
+    catch ( ... ) {
+    }
+}
+
+//! Test for movable_exception behavior, and external exception recording.
+/** Allocates a root task that spawns a bunch of children, one or several of which throw 
+    a movable exception in a worker or master thread (depending on the global settings).
+    The test also checks the correctness of multiple rethrowing of the pending exception. **/
+void TestMovableException () {
+    ResetGlobals();
+    tbb::task_group_context ctx;
+    tbb::empty_task *r = new( tbb::task::allocate_root() ) tbb::empty_task;
+    ASSERT (!g_CurStat.Existing() && !g_CurStat.Existed() && !g_CurStat.Executed(), 
+            "something wrong with the task accounting");
+    r->set_ref_count(NUM_CHILD_TASKS + 1);
+    for ( int i = 0; i < NUM_CHILD_TASKS; ++i )
+        r->spawn( *new( r->allocate_child() ) LeafTaskWithMovableExceptions );
+    TRY()
+        r->wait_for_all();
+    } catch ( ... ) {
+        ASSERT (!ctx.is_group_execution_cancelled(), "");
+        CheckException();
+        try {
+            throw;
+        } catch ( tbb::tbb_exception& e ) {
+            CheckException(e);
+            g_ExceptionCaught = exceptionCaught = true;
+        }
+        catch ( ... ) {
+            g_ExceptionCaught = true;
+            g_UnknownException = unknownException = true;
+        }
+        ctx.register_pending_exception();
+        ASSERT (ctx.is_group_execution_cancelled(), "After exception registration the context must be in the cancelled state");
+    }
+    r->destroy(*r);
+    ASSERT_EXCEPTION();
+    ASSERT_TEST_POSTCOND();
+
+    r = new( tbb::task::allocate_root(ctx) ) tbb::empty_task;
+    r->set_ref_count(1);
+    g_ExceptionCaught = g_UnknownException = false;
+    try {
+        r->wait_for_all();
+    } catch ( tbb::tbb_exception& e ) {
+        CheckException(e);
+        g_ExceptionCaught = true;
+    }
+    catch ( ... ) {
+        g_ExceptionCaught = true;
+        g_UnknownException = true;
+    }
+    ASSERT (g_ExceptionCaught, "no exception occurred");
+    ASSERT (__TBB_EXCEPTION_TYPE_INFO_BROKEN || !g_UnknownException, "unknown exception was caught");
+    r->destroy(*r);
+} // void Test10 ()
+
+#endif /* TBB_USE_EXCEPTIONS */
+
+template<class T>
+class CtxLauncherTask : public tbb::task {
+    tbb::task_group_context &m_Ctx;
+
+    tbb::task* execute () {
+        tbb::task::spawn_root_and_wait( *new( tbb::task::allocate_root(m_Ctx) ) T );
+        return NULL;
+    }
+public:
+    CtxLauncherTask ( tbb::task_group_context& ctx ) : m_Ctx(ctx) {}
+};
+
+//! Test for cancelling a task hierarchy from outside (from a task running in parallel with it).
+void TestCancelation () {
+    ResetGlobals();
+    g_ThrowException = false;
+    tbb::task_group_context  ctx;
+    tbb::task_list  tl;
+    tl.push_back( *new( tbb::task::allocate_root() ) CtxLauncherTask<SimpleRootTask>(ctx) );
+    tl.push_back( *new( tbb::task::allocate_root() ) CancellatorTask(ctx, NUM_CHILD_TASKS / 4) );
+    TRY();
+        tbb::task::spawn_root_and_wait(tl);
+    CATCH_AND_FAIL();
+    ASSERT (g_CurStat.Executed() <= g_ExecutedAtCatch + g_NumThreads, "Too many tasks were executed after cancellation");
+    ASSERT_TEST_POSTCOND();
+} // void Test9 ()
+
+class CtxDestroyerTask : public tbb::task {
+    int m_nestingLevel;
+
+    tbb::task* execute () {
+        ASSERT ( m_nestingLevel >= 0 && m_nestingLevel < MaxNestingDepth, "Wrong nesting level. The test is broken" );
+        tbb::task_group_context  ctx;
+        tbb::task *t = new( tbb::task::allocate_root(ctx) ) tbb::empty_task;
+        int level = ++m_nestingLevel;
+        if ( level < MaxNestingDepth ) {
+            execute();
+        }
+        else {
+            if ( !CancellatorTask::WaitUntilReady() )
+                REPORT( "Warning: missing wakeup\n" );
+            ++g_CurExecuted;
+        }
+        if ( ctx.is_group_execution_cancelled() )
+            ++s_numCancelled;
+        t->destroy(*t);
+        return NULL;
+    }
+public:
+    CtxDestroyerTask () : m_nestingLevel(0) { s_numCancelled = 0; }
+
+    static const int MaxNestingDepth = 256;
+    static int s_numCancelled;
+};
+
+int CtxDestroyerTask::s_numCancelled = 0;
+
+//! Test for data race between cancellation propagation and context destruction.
+/** If the data race ever occurs, an assertion inside TBB will be triggered. **/
+void TestCtxDestruction () {
+    for ( size_t i = 0; i < 10; ++i ) {
+        tbb::task_group_context  ctx;
+        tbb::task_list  tl;
+        ResetGlobals();
+        g_BoostExecutedCount = false;
+        g_ThrowException = false;
+        CancellatorTask::Reset();
+
+        tl.push_back( *new( tbb::task::allocate_root() ) CtxLauncherTask<CtxDestroyerTask>(ctx) );
+        tl.push_back( *new( tbb::task::allocate_root() ) CancellatorTask(ctx, 1) );
+        tbb::task::spawn_root_and_wait(tl);
+        ASSERT( g_CurExecuted == 1, "Test is broken" );
+        ASSERT( CtxDestroyerTask::s_numCancelled <= CtxDestroyerTask::MaxNestingDepth, "Test is broken" );
+    }
+} // void TestCtxDestruction()
+
+#include <algorithm>
+#include "harness_barrier.h"
+
+class CtxConcurrentDestroyer : NoAssign, Harness::NoAfterlife {
+    static const int ContextsPerThread = 512;
+
+    static int s_Concurrency;
+    static int s_NumContexts;
+    static tbb::task_group_context** s_Contexts;
+    static char* s_Buffer;
+    static Harness::SpinBarrier s_Barrier;
+    static Harness::SpinBarrier s_ExitBarrier;
+
+    struct Shuffler {
+        void operator() () const { std::random_shuffle(s_Contexts, s_Contexts + s_NumContexts); }
+    };
+public:
+    static void Init ( int p ) {
+        s_Concurrency = p;
+        s_NumContexts = p * ContextsPerThread;
+        s_Contexts = new tbb::task_group_context*[s_NumContexts];
+        s_Buffer = new char[s_NumContexts * sizeof(tbb::task_group_context)];
+        s_Barrier.initialize( p );
+        s_ExitBarrier.initialize( p );
+    }
+    static void Uninit () {
+        for ( int i = 0; i < s_NumContexts; ++i ) {
+            tbb::internal::context_list_node_t &node = s_Contexts[i]->my_node;
+            ASSERT( !node.my_next && !node.my_prev, "Destroyed context was written to during context chain update" );
+        }
+        delete s_Contexts;
+        delete s_Buffer;
+    }
+
+    void operator() ( int id ) const {
+        int begin = ContextsPerThread * id,
+            end = begin + ContextsPerThread;
+        for ( int i = begin; i < end; ++i )
+            s_Contexts[i] = new( s_Buffer + i * sizeof(tbb::task_group_context) ) tbb::task_group_context;
+        s_Barrier.wait( Shuffler() );
+        for ( int i = begin; i < end; ++i ) {
+            s_Contexts[i]->tbb::task_group_context::~task_group_context();
+            memset( s_Contexts[i], 0, sizeof(tbb::task_group_context) );
+        }
+        s_ExitBarrier.wait();
+    }
+}; // class CtxConcurrentDestroyer
+
+int CtxConcurrentDestroyer::s_Concurrency;
+int CtxConcurrentDestroyer::s_NumContexts;
+tbb::task_group_context** CtxConcurrentDestroyer::s_Contexts;
+char* CtxConcurrentDestroyer::s_Buffer;
+Harness::SpinBarrier CtxConcurrentDestroyer::s_Barrier;
+Harness::SpinBarrier CtxConcurrentDestroyer::s_ExitBarrier;
+
+void TestConcurrentCtxDestruction () {
+    REMARK( "TestConcurrentCtxDestruction\n" );
+    CtxConcurrentDestroyer::Init(g_NumThreads);
+    NativeParallelFor( g_NumThreads, CtxConcurrentDestroyer() );
+    CtxConcurrentDestroyer::Uninit();
+}
+
+void RunTests () {
+    REMARK ("Number of threads %d\n", g_NumThreads);
+    tbb::task_scheduler_init init (g_NumThreads);
+    g_Master = Harness::CurrentTid();
+#if TBB_USE_EXCEPTIONS
+    Test1();
+    Test2();
+    Test3();
+    Test4();
+    Test5();
+    Test6();
+    Test7();
+    Test8();
+    TestMovableException();
+#endif /* TBB_USE_EXCEPTIONS */
+    TestCancelation();
+    TestCtxDestruction();
+#if !RML_USE_WCRM
+    TestConcurrentCtxDestruction();
+#endif
+}
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+
+int TestMain () {
+    REMARK ("Using %s", TBB_USE_CAPTURED_EXCEPTION ? "tbb:captured_exception" : "exact exception propagation");
+    MinThread = min(NUM_ROOTS_IN_GROUP, min(tbb::task_scheduler_init::default_num_threads(), max(2, MinThread)));
+    MaxThread = min(NUM_ROOTS_IN_GROUP, max(MinThread, min(tbb::task_scheduler_init::default_num_threads(), MaxThread)));
+    ASSERT (NUM_ROOTS_IN_GROUP < NUM_ROOT_TASKS, "Fix defines");
+#if __TBB_TASK_GROUP_CONTEXT
+#if TBB_USE_EXCEPTIONS
+    // Test0 always runs on one thread
+    Test0();
+#endif /* TBB_USE_EXCEPTIONS */
+    g_SolitaryException = 0;
+    for ( g_NumThreads = MinThread; g_NumThreads <= MaxThread; ++g_NumThreads )
+        RunTests();
+    return Harness::Done;
+#else
+    return Harness::Skipped;
+#endif /* __TBB_TASK_GROUP_CONTEXT */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_enumerable_thread_specific.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_enumerable_thread_specific.cpp
new file mode 100644 (file)
index 0000000..5d1921f
--- /dev/null
@@ -0,0 +1,1021 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/enumerable_thread_specific.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/parallel_for.h"
+#include "tbb/parallel_reduce.h"
+#include "tbb/blocked_range.h"
+#include "tbb/tick_count.h"
+#include "tbb/tbb_allocator.h"
+#include "tbb/tbb_thread.h"
+#include "tbb/atomic.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <cstring>
+#include <vector>
+#include <deque>
+#include <list>
+#include <map>
+#include <utility>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "harness_assert.h"
+#include "harness.h"
+
+#if __TBB_GCC_WARNING_SUPPRESSION_ENABLED
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+
+static tbb::atomic<int> construction_counter;
+static tbb::atomic<int> destruction_counter;
+
+const int REPETITIONS = 10;
+const int N = 100000;
+const int VALID_NUMBER_OF_KEYS = 100;
+const double EXPECTED_SUM = (REPETITIONS + 1) * N;
+
+//! A minimal class that occupies N bytes. 
+/** Defines default and copy constructor, and allows implicit operator&.
+    Hides operator=. */
+template<size_t N=tbb::internal::NFS_MaxLineSize>
+class minimal: NoAssign {
+private:
+    int my_value;
+    bool is_constructed;
+    char pad[N-sizeof(int) - sizeof(bool)];
+public:
+    minimal() : NoAssign(), my_value(0) { ++construction_counter; is_constructed = true; }
+    minimal( const minimal &m ) : NoAssign(), my_value(m.my_value) { ++construction_counter; is_constructed = true; }
+    ~minimal() { ++destruction_counter; ASSERT(is_constructed, NULL); is_constructed = false; }
+    void set_value( const int i ) { ASSERT(is_constructed, NULL); my_value = i; }
+    int value( ) const { ASSERT(is_constructed, NULL); return my_value; }
+};
+
+//
+// A helper class that simplifies writing the tests since minimal does not 
+// define = or + operators.
+//
+
+template< typename T >
+struct test_helper {
+   static inline void init(T &e) { e = static_cast<T>(0); }  
+   static inline void sum(T &e, const int addend ) { e += static_cast<T>(addend); }
+   static inline void sum(T &e, const double addend ) { e += static_cast<T>(addend); }
+   static inline void set(T &e, const int value ) { e = static_cast<T>(value); }
+   static inline double get(const T &e ) { return static_cast<double>(e); }
+};
+
+template<size_t N>
+struct test_helper<minimal<N> > {
+   static inline void init(minimal<N> &sum) { sum.set_value( 0 ); }  
+   static inline void sum(minimal<N> &sum, const int addend ) { sum.set_value( sum.value() + addend); }
+   static inline void sum(minimal<N> &sum, const double addend ) { sum.set_value( sum.value() + static_cast<int>(addend)); }
+   static inline void sum(minimal<N> &sum, const minimal<N> &addend ) { sum.set_value( sum.value() + addend.value()); }
+   static inline void set(minimal<N> &v, const int value ) { v.set_value( static_cast<int>(value) ); }
+   static inline double get(const minimal<N> &sum ) { return static_cast<double>(sum.value()); }
+};
+
+//! Tag class used to make certain constructors hard to invoke accidentally.
+struct SecretTagType {} SecretTag;
+
+//// functors and routines for initialization and combine
+
+// Addition
+
+template <typename T>
+struct FunctorAddCombineRef {
+    T operator()(const T& left, const T& right) const {
+        return left+right;
+    }
+};
+
+template <size_t N>
+struct FunctorAddCombineRef<minimal<N> > {
+    minimal<N> operator()(const minimal<N>& left, const minimal<N>& right) const {
+        minimal<N> result;
+        result.set_value( left.value() + right.value() ); 
+        return result;
+    }
+};
+
+//! Counts instances of FunctorFinit
+static tbb::atomic<int> FinitCounter;
+
+template <typename T, int Value>
+struct FunctorFinit {
+    FunctorFinit( const FunctorFinit& ) {++FinitCounter;}
+    FunctorFinit( SecretTagType ) {++FinitCounter;}
+    ~FunctorFinit() {--FinitCounter;}
+    T operator()() { return Value; }
+};
+
+template <size_t N, int Value>
+struct FunctorFinit<minimal<N>,Value> {
+    FunctorFinit( const FunctorFinit& ) {++FinitCounter;}
+    FunctorFinit( SecretTagType ) {++FinitCounter;}
+    ~FunctorFinit() {--FinitCounter;}
+    minimal<N> operator()() {   
+        minimal<N> result;
+        result.set_value( Value );
+        return result;
+    }
+};
+
+template <typename T>
+struct FunctorAddCombine {
+    T operator()(T left, T right ) const {
+        return FunctorAddCombineRef<T>()( left, right );
+    }
+};
+
+template <typename T>
+T my_combine_ref( const T &left, const T &right) { 
+    return FunctorAddCombineRef<T>()( left, right );
+}
+
+template <typename T>
+T my_combine( T left, T right) { return my_combine_ref(left,right); }
+
+template <typename T>
+class combine_one_helper {
+public:
+    combine_one_helper(T& _result) : my_result(_result) {}
+    void operator()(const T& new_bit) { test_helper<T>::sum(my_result, new_bit); }
+    combine_one_helper& operator=(const combine_one_helper& other) { 
+        test_helper<T>::set(my_result, test_helper<T>::get(other)); 
+        return *this; 
+    }
+private:
+    T& my_result;
+};
+
+//// end functors and routines
+
+template< typename T >
+void run_serial_scalar_tests(const char *test_name) {
+    tbb::tick_count t0;
+    T sum;
+    test_helper<T>::init(sum);
+
+    REMARK("Testing serial %s... ", test_name);  
+    for (int t = -1; t < REPETITIONS; ++t) {
+        if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+        for (int i = 0; i < N; ++i) {
+            test_helper<T>::sum(sum,1); 
+        }
+    }
+    double result_value = test_helper<T>::get(sum);
+    ASSERT( EXPECTED_SUM == result_value, NULL);
+    REMARK("done\nserial %s, 0, %g, %g\n", test_name, result_value, ( tbb::tick_count::now() - t0).seconds());
+}
+
+
+template <typename T>
+class parallel_scalar_body: NoAssign {
+    
+    tbb::enumerable_thread_specific<T> &sums;
+public:
+
+    parallel_scalar_body ( tbb::enumerable_thread_specific<T> &_sums ) : sums(_sums) { }
+
+    void operator()( const tbb::blocked_range<int> &r ) const {
+        for (int i = r.begin(); i != r.end(); ++i) 
+            test_helper<T>::sum( sums.local(), 1 );
+    }
+   
+};
+
+template< typename T >
+void run_parallel_scalar_tests_nocombine(const char *test_name) {
+
+    typedef tbb::enumerable_thread_specific<T> ets_type;
+
+    // We assume that static_sums zero-initialized or has a default constructor that zeros it.
+    static ets_type static_sums = ets_type( T() );
+
+    T exemplar;
+    test_helper<T>::init(exemplar);
+    T exemplar23;
+    test_helper<T>::set(exemplar23,23);
+
+    for (int p = MinThread; p <= MaxThread; ++p) { 
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); 
+        tbb::task_scheduler_init init(p);
+        tbb::tick_count t0;
+
+        T iterator_sum;
+        test_helper<T>::init(iterator_sum);
+
+        T finit_ets_sum;
+        test_helper<T>::init(finit_ets_sum);
+
+        T const_iterator_sum; 
+        test_helper<T>::init(const_iterator_sum);
+
+        T range_sum;
+        test_helper<T>::init(range_sum);
+
+        T const_range_sum;
+        test_helper<T>::init(const_range_sum);
+
+        T cconst_sum;
+        test_helper<T>::init(cconst_sum);
+
+        T assign_sum;
+        test_helper<T>::init(assign_sum);
+
+        T cassgn_sum;
+        test_helper<T>::init(cassgn_sum);
+        T non_cassgn_sum;
+        test_helper<T>::init(non_cassgn_sum);
+
+        T static_sum;
+        test_helper<T>::init(static_sum);
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+
+            static_sums.clear();
+
+            ets_type sums(exemplar);
+            FunctorFinit<T,0> my_finit(SecretTag);
+            ets_type finit_ets(my_finit);
+
+            ASSERT( sums.empty(), NULL);
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), parallel_scalar_body<T>( sums ) );
+            ASSERT( !sums.empty(), NULL);
+
+            ASSERT( finit_ets.empty(), NULL);
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), parallel_scalar_body<T>( finit_ets ) );
+            ASSERT( !finit_ets.empty(), NULL);
+
+            ASSERT(static_sums.empty(), NULL);
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), parallel_scalar_body<T>( static_sums ) );
+            ASSERT( !static_sums.empty(), NULL);
+
+            // use iterator
+            typename ets_type::size_type size = 0;
+            for ( typename ets_type::iterator i = sums.begin(); i != sums.end(); ++i ) {
+                 ++size;
+                 test_helper<T>::sum(iterator_sum, *i);
+            }
+            ASSERT( sums.size() == size, NULL);
+
+            // use const_iterator
+            for ( typename ets_type::const_iterator i = sums.begin(); i != sums.end(); ++i ) {
+                 test_helper<T>::sum(const_iterator_sum, *i);
+            }
+           
+            // use range_type
+            typename ets_type::range_type r = sums.range();  
+            for ( typename ets_type::range_type::const_iterator i = r.begin(); i != r.end(); ++i ) {
+                 test_helper<T>::sum(range_sum, *i);
+            }
+           
+            // use const_range_type
+            typename ets_type::const_range_type cr = sums.range();  
+            for ( typename ets_type::const_range_type::iterator i = cr.begin(); i != cr.end(); ++i ) {
+                 test_helper<T>::sum(const_range_sum, *i);
+            }
+
+            // test copy constructor, with TLS-cached locals
+            typedef typename tbb::enumerable_thread_specific<T, tbb::cache_aligned_allocator<T>, tbb::ets_key_per_instance> cached_ets_type;
+
+            cached_ets_type cconst(sums); 
+
+            for ( typename cached_ets_type::const_iterator i = cconst.begin(); i != cconst.end(); ++i ) {
+                 test_helper<T>::sum(cconst_sum, *i);
+            }
+           
+            // test assignment
+            ets_type assigned;
+            assigned = sums;
+
+            for ( typename ets_type::const_iterator i = assigned.begin(); i != assigned.end(); ++i ) {
+                 test_helper<T>::sum(assign_sum, *i);
+            }
+
+            // test assign to and from cached locals
+            cached_ets_type cassgn;
+            cassgn = sums;
+            for ( typename cached_ets_type::const_iterator i = cassgn.begin(); i != cassgn.end(); ++i ) {
+                 test_helper<T>::sum(cassgn_sum, *i);
+            }
+
+            ets_type non_cassgn;
+            non_cassgn = cassgn;
+            for ( typename ets_type::const_iterator i = non_cassgn.begin(); i != non_cassgn.end(); ++i ) {
+                 test_helper<T>::sum(non_cassgn_sum, *i);
+            }
+
+            // test finit-initialized ets
+            for(typename ets_type::const_iterator i = finit_ets.begin(); i != finit_ets.end(); ++i) {
+                test_helper<T>::sum(finit_ets_sum, *i);
+            }
+
+            // test static ets
+            for(typename ets_type::const_iterator i = static_sums.begin(); i != static_sums.end(); ++i) {
+                test_helper<T>::sum(static_sum, *i);
+            }
+
+        }
+
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(iterator_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(const_iterator_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(range_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(const_range_sum), NULL);
+
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(cconst_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(assign_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(cassgn_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(non_cassgn_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(finit_ets_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(static_sum), NULL);
+
+        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, test_helper<T>::get(iterator_sum), 
+                                                      ( tbb::tick_count::now() - t0).seconds());
+    }
+}
+
+template< typename T >
+void run_parallel_scalar_tests(const char *test_name) {
+
+    typedef tbb::enumerable_thread_specific<T> ets_type;
+
+    // We assume that static_sums zero-initialized or has a default constructor that zeros it.
+    static ets_type static_sums = ets_type( T() );
+
+    T exemplar;
+    test_helper<T>::init(exemplar);
+
+    run_parallel_scalar_tests_nocombine<T>(test_name);
+
+    for (int p = MinThread; p <= MaxThread; ++p) { 
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); 
+        tbb::task_scheduler_init init(p);
+        tbb::tick_count t0;
+
+        T combine_sum;
+        test_helper<T>::init(combine_sum);
+
+        T combine_ref_sum;
+        test_helper<T>::init(combine_ref_sum);
+
+        T combine_one_sum;
+        test_helper<T>::init(combine_one_sum);
+
+        T static_sum;
+        test_helper<T>::init(static_sum);
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+
+            static_sums.clear();
+
+            ets_type sums(exemplar);
+
+            ASSERT( sums.empty(), NULL);
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), parallel_scalar_body<T>( sums ) );
+            ASSERT( !sums.empty(), NULL);
+
+            ASSERT(static_sums.empty(), NULL);
+            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), parallel_scalar_body<T>( static_sums ) );
+            ASSERT( !static_sums.empty(), NULL);
+
+
+            // Use combine
+            test_helper<T>::sum(combine_sum, sums.combine(my_combine<T>));
+            test_helper<T>::sum(combine_ref_sum, sums.combine(my_combine_ref<T>));
+            test_helper<T>::sum(static_sum, static_sums.combine(my_combine<T>));
+
+            combine_one_helper<T> my_helper(combine_one_sum);
+            sums.combine_each(my_helper);
+            }
+
+
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(combine_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(combine_ref_sum), NULL);
+        ASSERT( EXPECTED_SUM == test_helper<T>::get(static_sum), NULL);
+
+        REMARK("done\nparallel combine %s, %d, %g, %g\n", test_name, p, test_helper<T>::get(combine_sum), 
+                                                      ( tbb::tick_count::now() - t0).seconds());
+    }
+}
+
+template <typename T>
+class parallel_vector_for_body: NoAssign {
+    
+    tbb::enumerable_thread_specific< std::vector<T, tbb::tbb_allocator<T> > > &locals;
+public:
+
+    parallel_vector_for_body ( tbb::enumerable_thread_specific< std::vector<T, tbb::tbb_allocator<T> > > &_locals ) : locals(_locals) { }
+
+    void operator()( const tbb::blocked_range<int> &r ) const {
+        T one;
+        test_helper<T>::set(one, 1);
+
+        for (int i = r.begin(); i < r.end(); ++i) {
+            locals.local().push_back( one );
+        }
+    }
+   
+};
+
+template <typename R, typename T>
+struct parallel_vector_reduce_body {
+
+    T sum;    
+    size_t count;    
+
+    parallel_vector_reduce_body ( ) : count(0) { test_helper<T>::init(sum); }
+    parallel_vector_reduce_body ( parallel_vector_reduce_body<R, T> &, tbb::split ) : count(0) {  test_helper<T>::init(sum); }
+
+    void operator()( const R &r ) {
+        for (typename R::iterator ri = r.begin(); ri != r.end(); ++ri) {
+            const std::vector< T, tbb::tbb_allocator<T>  > &v = *ri; 
+            ++count;
+            for (typename std::vector<T, tbb::tbb_allocator<T> >::const_iterator vi = v.begin(); vi != v.end(); ++vi) {
+                test_helper<T>::sum(sum, *vi);
+            }
+        }
+    }
+
+    void join( const parallel_vector_reduce_body &b ) {
+        test_helper<T>::sum(sum,b.sum);
+        count += b.count;
+    }
+   
+};
+
+template< typename T >
+void run_parallel_vector_tests(const char *test_name) {
+    tbb::tick_count t0;
+    typedef std::vector<T, tbb::tbb_allocator<T> > container_type;
+
+    for (int p = MinThread; p <= MaxThread; ++p) {
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p);
+        tbb::task_scheduler_init init(p);
+
+        T sum;
+        test_helper<T>::init(sum);
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+            typedef typename tbb::enumerable_thread_specific< container_type > ets_type;
+            ets_type vs;
+
+            ASSERT( vs.empty(), NULL);
+            tbb::parallel_for ( tbb::blocked_range<int> (0, N, 10000), parallel_vector_for_body<T>( vs ) );
+            ASSERT( !vs.empty(), NULL);
+
+            // copy construct
+            ets_type vs2(vs); // this causes an assertion failure, related to allocators...
+
+            // assign
+            ets_type vs3;
+            vs3 = vs;
+
+            parallel_vector_reduce_body< typename tbb::enumerable_thread_specific< std::vector< T, tbb::tbb_allocator<T>  > >::const_range_type, T > pvrb;
+            tbb::parallel_reduce ( vs.range(1), pvrb );
+
+            test_helper<T>::sum(sum, pvrb.sum);
+
+            ASSERT( vs.size() == pvrb.count, NULL);
+
+            tbb::flattened2d<ets_type> fvs = flatten2d(vs);
+            size_t ccount = fvs.size();
+            size_t elem_cnt = 0;
+            for(typename tbb::flattened2d<ets_type>::const_iterator i = fvs.begin(); i != fvs.end(); ++i) {
+                ++elem_cnt;
+            };
+            ASSERT(ccount == elem_cnt, NULL);
+
+            elem_cnt = 0;
+            for(typename tbb::flattened2d<ets_type>::iterator i = fvs.begin(); i != fvs.end(); ++i) {
+                ++elem_cnt;
+            };
+            ASSERT(ccount == elem_cnt, NULL);
+        }
+
+        double result_value = test_helper<T>::get(sum);
+        ASSERT( EXPECTED_SUM == result_value, NULL);
+        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, result_value, ( tbb::tick_count::now() - t0).seconds());
+    }
+}
+
+template<typename T>
+void run_cross_type_vector_tests(const char *test_name) {
+    tbb::tick_count t0;
+    typedef std::vector<T, tbb::tbb_allocator<T> > container_type;
+
+    for (int p = MinThread; p <= MaxThread; ++p) {
+        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p);
+        tbb::task_scheduler_init init(p);
+
+        T sum;
+        test_helper<T>::init(sum);
+
+        for (int t = -1; t < REPETITIONS; ++t) {
+            if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+            typedef typename tbb::enumerable_thread_specific< container_type, tbb::cache_aligned_allocator<container_type>, tbb::ets_no_key > ets_nokey_type;
+            typedef typename tbb::enumerable_thread_specific< container_type, tbb::cache_aligned_allocator<container_type>, tbb::ets_key_per_instance > ets_tlskey_type;
+            ets_nokey_type vs;
+
+            ASSERT( vs.empty(), NULL);
+            tbb::parallel_for ( tbb::blocked_range<int> (0, N, 10000), parallel_vector_for_body<T>( vs ) );
+            ASSERT( !vs.empty(), NULL);
+
+            // copy construct
+            ets_tlskey_type vs2(vs);
+
+            // assign
+            ets_nokey_type vs3;
+            vs3 = vs2;
+
+            parallel_vector_reduce_body< typename tbb::enumerable_thread_specific< std::vector< T, tbb::tbb_allocator<T>  > >::const_range_type, T > pvrb;
+            tbb::parallel_reduce ( vs3.range(1), pvrb );
+
+            test_helper<T>::sum(sum, pvrb.sum);
+
+            ASSERT( vs3.size() == pvrb.count, NULL);
+
+            tbb::flattened2d<ets_nokey_type> fvs = flatten2d(vs3);
+            size_t ccount = fvs.size();
+            size_t elem_cnt = 0;
+            for(typename tbb::flattened2d<ets_nokey_type>::const_iterator i = fvs.begin(); i != fvs.end(); ++i) {
+                ++elem_cnt;
+            };
+            ASSERT(ccount == elem_cnt, NULL);
+
+            elem_cnt = 0;
+            for(typename tbb::flattened2d<ets_nokey_type>::iterator i = fvs.begin(); i != fvs.end(); ++i) {
+                ++elem_cnt;
+            };
+            ASSERT(ccount == elem_cnt, NULL);
+        }
+
+        double result_value = test_helper<T>::get(sum);
+        ASSERT( EXPECTED_SUM == result_value, NULL);
+        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, result_value, ( tbb::tick_count::now() - t0).seconds());
+    }
+}
+
+template< typename T >
+void run_serial_vector_tests(const char *test_name) {
+    tbb::tick_count t0;
+    T sum;
+    test_helper<T>::init(sum);
+    T one;
+    test_helper<T>::set(one, 1);
+
+    REMARK("Testing serial %s... ", test_name);
+    for (int t = -1; t < REPETITIONS; ++t) {
+        if (Verbose && t == 0) t0 = tbb::tick_count::now(); 
+        std::vector<T, tbb::tbb_allocator<T> > v; 
+        for (int i = 0; i < N; ++i) {
+            v.push_back( one );
+        }
+        for (typename std::vector<T, tbb::tbb_allocator<T> >::const_iterator i = v.begin(); i != v.end(); ++i) 
+            test_helper<T>::sum(sum, *i); 
+    }
+
+    double result_value = test_helper<T>::get(sum);
+    ASSERT( EXPECTED_SUM == result_value, NULL);
+    REMARK("done\nserial %s, 0, %g, %g\n", test_name, result_value, ( tbb::tick_count::now() - t0).seconds());
+}
+
+const size_t line_size = tbb::internal::NFS_MaxLineSize;
+
+void 
+run_serial_tests() {
+    run_serial_scalar_tests<int>("int");
+    run_serial_scalar_tests<double>("double");
+    run_serial_scalar_tests<minimal<> >("minimal<>");
+    run_serial_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >");
+    run_serial_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >");
+}
+
+void 
+run_parallel_tests() {
+    run_parallel_scalar_tests<int>("int");
+    run_parallel_scalar_tests<double>("double");
+    run_parallel_scalar_tests_nocombine<minimal<> >("minimal<>");
+    run_parallel_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >");
+    run_parallel_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >");
+}
+
+void
+run_cross_type_tests() {
+    // cross-type scalar tests are part of run_serial_scalar_tests
+    run_cross_type_vector_tests<int>("std::vector<int, tbb::tbb_allocator<int> >");
+    run_parallel_vector_tests<double>("std::vector<double, tbb::tbb_allocator<double> >");
+}
+
+typedef tbb::enumerable_thread_specific<minimal<line_size> > flogged_ets;
+
+class set_body {
+    flogged_ets *a;
+
+public:
+    set_body( flogged_ets*_a ) : a(_a) { }
+
+    void operator() ( ) const {
+        for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) {
+            a[i].local().set_value(i + 1);
+        }
+    }
+};
+
+void do_tbb_threads( int max_threads, flogged_ets a[] ) {
+    std::vector< tbb::tbb_thread * > threads;
+
+    for (int p = 0; p < max_threads; ++p) { 
+        threads.push_back( new tbb::tbb_thread ( set_body( a ) ) ); 
+    }
+
+    for (int p = 0; p < max_threads; ++p) {
+        threads[p]->join();
+    }
+
+    for(int p = 0; p < max_threads; ++p) {
+        delete threads[p];
+    }
+}
+
+void
+flog_key_creation_and_deletion() {
+    const int FLOG_REPETITIONS = 100;
+
+    for (int p = MinThread; p <= MaxThread; ++p) { 
+        REMARK("Testing repeated deletes on %d threads... ", p);
+
+        for (int j = 0; j < FLOG_REPETITIONS; ++j) {
+            construction_counter = 0;
+            destruction_counter = 0;
+
+            // causes VALID_NUMER_OF_KEYS exemplar instances to be constructed 
+            flogged_ets* a = new flogged_ets[VALID_NUMBER_OF_KEYS];
+            ASSERT(int(construction_counter) == 0, NULL);   // no exemplars or actual locals have been constructed
+            ASSERT(int(destruction_counter) == 0, NULL);    // and none have been destroyed
+
+            // causes p * VALID_NUMBER_OF_KEYS minimals to be created
+            do_tbb_threads(p, a); 
+
+            for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) {
+                int pcnt = 0;
+                for ( flogged_ets::iterator tli = a[i].begin(); tli != a[i].end(); ++tli ) {
+                    ASSERT( (*tli).value() == i+1, NULL );
+                    ++pcnt;
+                }
+                ASSERT( pcnt == p, NULL);  // should be one local per thread.
+            }
+            delete[] a;
+        }
+
+        ASSERT( int(construction_counter) == (p)*VALID_NUMBER_OF_KEYS, NULL );
+        ASSERT( int(destruction_counter) == (p)*VALID_NUMBER_OF_KEYS, NULL );
+
+        REMARK("done\nTesting repeated clears on %d threads... ", p);
+
+        construction_counter = 0;
+        destruction_counter = 0;
+
+        // causes VALID_NUMER_OF_KEYS exemplar instances to be constructed 
+        flogged_ets* a = new flogged_ets[VALID_NUMBER_OF_KEYS];
+        for (int j = 0; j < FLOG_REPETITIONS; ++j) {
+
+            // causes p * VALID_NUMBER_OF_KEYS minimals to be created
+            do_tbb_threads(p, a);
+
+            for (int i = 0; i < VALID_NUMBER_OF_KEYS; ++i) {
+                for ( flogged_ets::iterator tli = a[i].begin(); tli != a[i].end(); ++tli ) {
+                    ASSERT( (*tli).value() == i+1, NULL );
+                }
+                a[i].clear();
+                ASSERT( static_cast<int>(a[i].end() - a[i].begin()) == 0, NULL );
+            }
+
+        }
+
+        delete[] a;
+
+        ASSERT( int(construction_counter) == (FLOG_REPETITIONS*p)*VALID_NUMBER_OF_KEYS, NULL );
+        ASSERT( int(destruction_counter) == (FLOG_REPETITIONS*p)*VALID_NUMBER_OF_KEYS, NULL );
+
+        REMARK("done\n");
+    }
+
+}
+
+template <typename inner_container>
+void 
+flog_segmented_interator() {
+
+    bool found_error = false;
+    typedef typename inner_container::value_type T;
+    typedef std::vector< inner_container > nested_vec;
+    inner_container my_inner_container;
+    my_inner_container.clear();
+    nested_vec my_vec;
+
+    // simple nested vector (neither level empty)
+    const int maxval = 10;
+    for(int i=0; i < maxval; i++) {
+        my_vec.push_back(my_inner_container);
+        for(int j = 0; j < maxval; j++) {
+            my_vec.at(i).push_back((T)(maxval * i + j));
+        }
+    }
+
+    tbb::internal::segmented_iterator<nested_vec, T> my_si(my_vec);
+
+    T ii;
+    for(my_si=my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) {
+        if((*my_si) != ii) {
+            found_error = true;
+            REMARK( "*my_si=%d\n", int(*my_si));
+        }
+    }
+
+    // outer level empty
+    my_vec.clear();
+    for(my_si=my_vec.begin(); my_si != my_vec.end(); ++my_si) {
+        found_error = true;
+    }
+
+    // inner levels empty
+    my_vec.clear();
+    for(int i =0; i < maxval; ++i) {
+        my_vec.push_back(my_inner_container);
+    }
+    for(my_si = my_vec.begin(); my_si != my_vec.end(); ++my_si) {
+        found_error = true;
+    }
+
+    // every other inner container is empty
+    my_vec.clear();
+    for(int i=0; i < maxval; ++i) {
+        my_vec.push_back(my_inner_container);
+        if(i%2) {
+            for(int j = 0; j < maxval; ++j) {
+                my_vec.at(i).push_back((T)(maxval * (i/2) + j));
+            }
+        }
+    }
+    for(my_si = my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) {
+        if((*my_si) != ii) {
+            found_error = true;
+            REMARK("*my_si=%d, ii=%d\n", (int)(*my_si), (int)ii);
+        }
+    }
+
+    tbb::internal::segmented_iterator<nested_vec, const T> my_csi(my_vec);
+    for(my_csi=my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) {
+        if((*my_csi) != ii) {
+            found_error = true;
+            REMARK( "*my_csi=%d\n", int(*my_csi));
+        }
+    }
+
+    // outer level empty
+    my_vec.clear();
+    for(my_csi=my_vec.begin(); my_csi != my_vec.end(); ++my_csi) {
+        found_error = true;
+    }
+
+    // inner levels empty
+    my_vec.clear();
+    for(int i =0; i < maxval; ++i) {
+        my_vec.push_back(my_inner_container);
+    }
+    for(my_csi = my_vec.begin(); my_csi != my_vec.end(); ++my_csi) {
+        found_error = true;
+    }
+
+    // every other inner container is empty
+    my_vec.clear();
+    for(int i=0; i < maxval; ++i) {
+        my_vec.push_back(my_inner_container);
+        if(i%2) {
+            for(int j = 0; j < maxval; ++j) {
+                my_vec.at(i).push_back((T)(maxval * (i/2) + j));
+            }
+        }
+    }
+    for(my_csi = my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) {
+        if((*my_csi) != ii) {
+            found_error = true;
+            REMARK("*my_csi=%d, ii=%d\n", (int)(*my_csi), (int)ii);
+        }
+    }
+
+
+    if(found_error) REPORT("segmented_iterator failed\n");
+}
+
+template <typename Key, typename Val>
+void
+flog_segmented_iterator_map() {
+   typedef typename std::map<Key, Val> my_map;
+   typedef std::vector< my_map > nested_vec;
+   my_map my_inner_container;
+   my_inner_container.clear();
+   nested_vec my_vec;
+   my_vec.clear();
+   bool found_error = false;
+
+   // simple nested vector (neither level empty)
+   const int maxval = 4;
+   for(int i=0; i < maxval; i++) {
+       my_vec.push_back(my_inner_container);
+       for(int j = 0; j < maxval; j++) {
+           my_vec.at(i).insert(std::make_pair<Key,Val>(maxval * i + j, 2*(maxval*i + j)));
+       }
+   }
+
+   tbb::internal::segmented_iterator<nested_vec, std::pair<const Key, Val> > my_si(my_vec);
+   Key ii;
+   for(my_si=my_vec.begin(), ii=0; my_si != my_vec.end(); ++my_si, ++ii) {
+       if(((*my_si).first != ii) || ((*my_si).second != 2*ii)) {
+           found_error = true;
+           REMARK( "ii=%d, (*my_si).first=%d, second=%d\n",ii, int((*my_si).first), int((*my_si).second));
+       }
+   }
+
+   tbb::internal::segmented_iterator<nested_vec, const std::pair<const Key, Val> > my_csi(my_vec);
+   for(my_csi=my_vec.begin(), ii=0; my_csi != my_vec.end(); ++my_csi, ++ii) {
+       if(((*my_csi).first != ii) || ((*my_csi).second != 2*ii)) {
+           found_error = true;
+           REMARK( "ii=%d, (*my_csi).first=%d, second=%d\n",ii, int((*my_csi).first), int((*my_csi).second));
+       }
+   }
+}
+
+void
+run_segmented_iterator_tests() {
+   // only the following containers can be used with the segmented iterator.
+   REMARK("Running Segmented Iterator Tests\n");
+   flog_segmented_interator<std::vector< int > >();
+   flog_segmented_interator<std::vector< double > >();
+   flog_segmented_interator<std::deque< int > >();
+   flog_segmented_interator<std::deque< double > >();
+   flog_segmented_interator<std::list< int > >();
+   flog_segmented_interator<std::list< double > >();
+
+   flog_segmented_iterator_map<int, int>();
+   flog_segmented_iterator_map<int, double>(); 
+}
+
+template <typename T>
+void
+run_assign_and_copy_constructor_test(const char *test_name) {
+    REMARK("Testing assignment and copy construction for %s\n", test_name);
+
+    // test initializer with exemplar
+    T initializer0;
+    test_helper<T>::init(initializer0);
+    T initializer7;
+    test_helper<T>::set(initializer7,7);
+    tbb::enumerable_thread_specific<T> create1(initializer7);
+    (void) create1.local();  // create an initialized value
+    ASSERT(7 == test_helper<T>::get(create1.local()), NULL);
+
+    // test copy construction with exemplar initializer
+    create1.clear();
+    tbb::enumerable_thread_specific<T> copy1(create1);
+    (void) copy1.local();
+    ASSERT(7 == test_helper<T>::get(copy1.local()), NULL);
+
+    // test copy assignment with exemplar initializer
+    create1.clear();
+    tbb::enumerable_thread_specific<T> assign1(initializer0);
+    assign1 = create1;
+    (void) assign1.local();
+    ASSERT(7 == test_helper<T>::get(assign1.local()), NULL);
+
+    // test creation with finit function
+    FunctorFinit<T,7> my_finit7(SecretTag);
+    tbb::enumerable_thread_specific<T> create2(my_finit7);
+    (void) create2.local();
+    ASSERT(7 == test_helper<T>::get(create2.local()), NULL);
+
+    // test copy construction with function initializer
+    create2.clear();
+    tbb::enumerable_thread_specific<T> copy2(create2);
+    (void) copy2.local();
+    ASSERT(7 == test_helper<T>::get(copy2.local()), NULL);
+
+    // test copy assignment with function initializer
+    create2.clear();
+    FunctorFinit<T,0> my_finit(SecretTag);
+    tbb::enumerable_thread_specific<T> assign2(my_finit);
+    assign2 = create2;
+    (void) assign2.local();
+    ASSERT(7 == test_helper<T>::get(assign2.local()), NULL);
+}
+
+void
+run_assignment_and_copy_constructor_tests() {
+    REMARK("Running assignment and copy constructor tests\n");
+    run_assign_and_copy_constructor_test<int>("int");
+    run_assign_and_copy_constructor_test<double>("double");
+    // Try class sizes that are close to a cache line in size, in order to check padding calculations.
+    run_assign_and_copy_constructor_test<minimal<line_size-1> >("minimal<line_size-1>");
+    run_assign_and_copy_constructor_test<minimal<line_size> >("minimal<line_size>");
+    run_assign_and_copy_constructor_test<minimal<line_size+1> >("minimal<line_size+1>");
+    ASSERT(FinitCounter==0, NULL);
+}
+
+// Class with no default constructor
+class HasNoDefaultConstructor {
+    HasNoDefaultConstructor();
+public:
+    HasNoDefaultConstructor( SecretTagType ) {}
+};
+
+// Initialization functor for a HasNoDefaultConstructor
+struct HasNoDefaultConstructorFinit {
+    HasNoDefaultConstructor operator()() {
+        return HasNoDefaultConstructor(SecretTag);
+    }
+};
+
+struct HasNoDefaultConstructorCombine {
+    HasNoDefaultConstructor operator()( HasNoDefaultConstructor, HasNoDefaultConstructor ) {
+        return HasNoDefaultConstructor(SecretTag);
+    }
+};
+
+//! Test situations where only default constructor or copy constructor is required.
+void TestInstantiation() {
+    // Test instantiation is possible when copy constructor is not required.
+    tbb::enumerable_thread_specific<NoCopy> ets1;
+
+    // Test instantiation when default constructor is not required, because exemplar is provided.
+    HasNoDefaultConstructor x(SecretTag);
+    tbb::enumerable_thread_specific<HasNoDefaultConstructor> ets2(x);
+    ets2.combine(HasNoDefaultConstructorCombine());
+
+    // Test instantiation when default constructor is not required, because init function is provided.
+    HasNoDefaultConstructorFinit f;
+    tbb::enumerable_thread_specific<HasNoDefaultConstructor> ets3(f);
+    ets3.combine(HasNoDefaultConstructorCombine());
+}
+
+int TestMain () {
+    TestInstantiation();
+    run_segmented_iterator_tests();
+    flog_key_creation_and_deletion();
+
+    if (MinThread == 0) {
+        run_serial_tests();
+        MinThread = 1;
+    }
+    if (MaxThread > 0) {
+        run_parallel_tests();
+        run_cross_type_tests();
+    }
+
+    run_assignment_and_copy_constructor_tests();
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_fast_random.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_fast_random.cpp
new file mode 100644 (file)
index 0000000..fbece77
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/**
+    The test checks that for different ranges of random numbers (from 0 to 
+    [MinThread, MaxThread]) generated with different seeds the probability 
+    of each number in the range deviates from the ideal random distribution
+    by no more than AcceptableDeviation percent.
+**/
+
+#include "harness_inject_scheduler.h"
+
+#define HARNESS_DEFAULT_MIN_THREADS 2
+#define HARNESS_DEFAULT_MAX_THREADS 32
+
+#define TEST_TOTAL_SEQUENCE 0
+
+#include "harness.h"
+#include "tbb/atomic.h"
+
+//! Coefficient defining tolerable deviation from ideal random distribution
+const double AcceptableDeviation = 2.1;
+//! Tolerable probability of failure to achieve tolerable distribution
+const double AcceptableProbabilityOfOutliers = 1e-6;
+//! Coefficient defining the length of random numbers series used to estimate the distribution
+/** Number of random values generated per each range element. I.e. the larger is 
+    the range, the longer is the series of random values. **/
+const uintptr_t SeriesBaseLen = 100;
+//! Number of random numbers series to generate
+const uintptr_t NumSeries = 100;
+//! Number of random number generation series with different seeds
+const uintptr_t NumSeeds = 100;
+
+tbb::atomic<uintptr_t> NumHighOutliers;
+tbb::atomic<uintptr_t> NumLowOutliers;
+
+inline void CheckProbability ( double probability, double expectedProbability, int index, int numIndices ) {
+    double lowerBound = expectedProbability / AcceptableDeviation,
+           upperBound = expectedProbability * AcceptableDeviation;
+    if ( probability < lowerBound ) {
+        if ( !NumLowOutliers )
+            REMARK( "Warning: Probability %.3f of hitting index %d among %d elements is out of acceptable range (%.3f - %.3f)\n",
+                    probability, index, numIndices, lowerBound, upperBound );
+        ++NumLowOutliers;
+    }
+    else if ( probability > upperBound ) {
+        if ( !NumHighOutliers )
+            REMARK( "Warning: Probability %.3f of hitting index %d among %d elements is out of acceptable range (%.3f - %.3f)\n",
+                    probability, index, numIndices, lowerBound, upperBound );
+        ++NumHighOutliers;
+    }
+}
+
+struct CheckDistributionBody {
+    void operator() ( int id ) const {
+        uintptr_t randomRange = id + MinThread;
+        uintptr_t *curHits = new uintptr_t[randomRange]
+#if TEST_TOTAL_SEQUENCE
+                , *totalHits = new uintptr_t[randomRange]
+#endif
+        ;
+        double expectedProbability = 1./randomRange;
+        // Loop through different seeds
+        for ( uintptr_t i = 0; i < NumSeeds; ++i ) {
+            // Seed value is selected in two ways, the first of which mimics 
+            // the one used by the TBB task scheduler
+            void* seed = i % 2 ? (char*)&curHits + i * 16 : (void*)(i * 8);
+            tbb::internal::FastRandom random( (unsigned)(uintptr_t)seed );
+            memset( curHits, 0, randomRange * sizeof(uintptr_t) );
+#if TEST_TOTAL_SEQUENCE
+            memset( totalHits, 0, randomRange * sizeof(uintptr_t) );
+#endif
+            const uintptr_t seriesLen = randomRange * SeriesBaseLen,
+                            experimentLen = NumSeries * seriesLen;
+            uintptr_t *curSeries = new uintptr_t[seriesLen],  // circular buffer
+                       randsGenerated = 0;
+            // Initialize statistics
+            while ( randsGenerated < seriesLen ) {
+                uintptr_t idx = random.get() % randomRange;
+                ++curHits[idx];
+#if TEST_TOTAL_SEQUENCE
+                ++totalHits[idx];
+#endif
+                curSeries[randsGenerated++] = idx;
+            }
+            while ( randsGenerated < experimentLen ) {
+                for ( uintptr_t j = 0; j < randomRange; ++j ) {
+                    CheckProbability( double(curHits[j])/seriesLen, expectedProbability, j, randomRange );
+#if TEST_TOTAL_SEQUENCE
+                    CheckProbability( double(totalHits[j])/randsGenerated, expectedProbability, j, randomRange );
+#endif
+                }
+                --curHits[curSeries[randsGenerated % seriesLen]];
+                int idx = random.get() % randomRange;
+                ++curHits[idx];
+#if TEST_TOTAL_SEQUENCE
+                ++totalHits[idx];
+#endif
+                curSeries[randsGenerated++ % seriesLen] = idx;
+            }
+            delete [] curSeries;
+        }
+        delete [] curHits;
+#if TEST_TOTAL_SEQUENCE
+        delete [] totalHits;
+#endif
+    }
+};
+
+#include "tbb/tbb_thread.h"
+
+int TestMain () {
+    ASSERT( AcceptableDeviation < 100, NULL );
+    MinThread = max(MinThread, 2);
+    MaxThread = max(MinThread, MaxThread);
+    double NumChecks = double(NumSeeds) * (MaxThread - MinThread + 1) * (MaxThread + MinThread) / 2.0 * (SeriesBaseLen * NumSeries - SeriesBaseLen);
+    REMARK( "Number of distribution quality checks %g\n", NumChecks );
+    NumLowOutliers = NumHighOutliers = 0;
+    // Parallelism is used in this test only to speed up the long serial checks
+    // Essentially it is a loop over random number ranges
+    // Ideally tbb::parallel_for could be used to parallelize the outermost loop 
+    // in CheckDistributionBody, but it is not used to avoid unit test contamination.
+    int P = tbb::tbb_thread::hardware_concurrency();
+    while ( MinThread <= MaxThread ) {
+        NativeParallelFor( min(P, MaxThread - MinThread + 1), CheckDistributionBody() );
+        MinThread += P;
+    }
+    double observedProbabilityOfOutliers = (NumLowOutliers + NumHighOutliers) / NumChecks;
+    if ( observedProbabilityOfOutliers > AcceptableProbabilityOfOutliers ) {
+        if ( NumLowOutliers )
+            REPORT( "Warning: %d cases of too low probability of a given number detected\n", (int)NumLowOutliers );
+        if ( NumHighOutliers )
+            REPORT( "Warning: %d cases of too high probability of a given number detected\n", (int)NumHighOutliers );
+        ASSERT( observedProbabilityOfOutliers <= AcceptableProbabilityOfOutliers, NULL );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_halt.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_halt.cpp
new file mode 100644 (file)
index 0000000..511d215
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include <cstdio>
+#include <cstdlib>
+#include <cassert>
+#include <utility>
+#include "tbb/task.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/tick_count.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+#include "tbb/mutex.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/queuing_mutex.h"
+#include "harness.h"
+
+using namespace std;
+using namespace tbb;
+
+///////////////////// Parallel methods ////////////////////////
+
+// *** Serial shared by mutexes *** //
+int SharedI = 1, SharedN;
+template<typename M>
+class SharedSerialFibBody: NoAssign {
+    M &mutex;
+public:
+    SharedSerialFibBody( M &m ) : mutex( m ) {}
+    //! main loop
+    void operator()( const blocked_range<int>& /*range*/ ) const {
+        for(;;) {
+            typename M::scoped_lock lock( mutex );
+            if(SharedI >= SharedN) break;
+            volatile double sum = 7.3; 
+            sum *= 11.17;
+            ++SharedI;
+        }
+    }
+};
+
+//! Root function
+template<class M>
+void SharedSerialFib(int n)
+{
+    SharedI = 1; 
+    SharedN = n; 
+    M mutex;
+    parallel_for( blocked_range<int>(0,4,1), SharedSerialFibBody<M>( mutex ) );
+}
+
+/////////////////////////// Main ////////////////////////////////////////////////////
+
+double Tsum = 0; int Tnum = 0;
+
+typedef void (*MeasureFunc)(int);
+//! Measure ticks count in loop [2..n]
+void Measure(const char *name, MeasureFunc func, int n)
+{
+    tick_count t0;
+    tick_count::interval_t T;
+    REMARK("%s",name);
+    t0 = tick_count::now();
+    for(int number = 2; number <= n; number++)
+        func(number);
+    T = tick_count::now() - t0;
+    double avg = Tnum? Tsum/Tnum : 1;
+    if (avg == 0.0) avg = 1;
+    if(avg * 100 < T.seconds()) {
+        REPORT("Warning: halting detected (%g sec, av: %g)\n", T.seconds(), avg);
+        ASSERT(avg * 1000 > T.seconds(), "Too long halting period");
+    } else {
+        Tsum += T.seconds(); Tnum++;
+    }
+    REMARK("\t- in %f msec\n", T.seconds()*1000);
+}
+
+int TestMain () {
+    MinThread = max(2, MinThread);
+    int NumbersCount = 100;
+    short recycle = 100;
+    do {
+        for(int threads = MinThread; threads <= MaxThread; threads++) {
+            task_scheduler_init scheduler_init(threads);
+            REMARK("Threads number is %d\t", threads);
+            Measure("Shared serial (wrapper mutex)\t", SharedSerialFib<mutex>, NumbersCount);
+            //sum = Measure("Shared serial (spin_mutex)", SharedSerialFib<tbb::spin_mutex>, NumbersCount);
+            //sum = Measure("Shared serial (queuing_mutex)", SharedSerialFib<tbb::queuing_mutex>, NumbersCount);
+        }
+    } while(--recycle);
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_handle_perror.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_handle_perror.cpp
new file mode 100644 (file)
index 0000000..60a047d
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Program for basic correctness of handle_perror, which is internal
+// to the TBB shared library.
+
+#include <cerrno>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "../tbb/tbb_misc.h"
+#include "harness.h"
+
+#if TBB_USE_EXCEPTIONS
+
+static void TestHandlePerror() {
+    bool caught = false;
+    try {
+        tbb::internal::handle_perror( EAGAIN, "apple" );
+    } catch( std::runtime_error& e ) {
+#if TBB_USE_EXCEPTIONS
+        REMARK("caught runtime_exception('%s')\n",e.what());
+        ASSERT( memcmp(e.what(),"apple: ",7)==0, NULL );
+        ASSERT( strstr(e.what(),"unavailable")!=NULL, "bad error message?" ); 
+#endif /* TBB_USE_EXCEPTIONS */
+        caught = true;
+    }
+    ASSERT(caught,NULL);
+}
+
+int TestMain () {
+    TestHandlePerror();
+    return Harness::Done;
+}
+
+#else /* !TBB_USE_EXCEPTIONS */
+
+int TestMain () {
+    return Harness::Skipped;
+}
+
+#endif /* TBB_USE_EXCEPTIONS */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_inits_loop.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_inits_loop.cpp
new file mode 100644 (file)
index 0000000..1836ee2
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if __APPLE__
+
+#define HARNESS_CUSTOM_MAIN 1
+#include "harness.h"
+#include <cstdlib>
+#include "tbb/task_scheduler_init.h"
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+
+bool exec_test(const char *self) {
+    int status = 1;
+    pid_t p = fork();
+    if(p < 0) {
+        REPORT("fork error: errno=%d: %s\n", errno, strerror(errno));
+        return true;
+    }
+    else if(p) { // parent
+        if(waitpid(p, &status, 0) != p) {
+            REPORT("wait error: errno=%d: %s\n", errno, strerror(errno));
+            return true;
+        }
+        if(WIFEXITED(status)) {
+            if(!WEXITSTATUS(status)) return false; // ok
+            else REPORT("child has exited with return code 0x%x\n", WEXITSTATUS(status));
+        } else {
+            REPORT("child error 0x%x:%s%s ", status, WIFSIGNALED(status)?" signalled":"",
+                WIFSTOPPED(status)?" stopped":"");
+            if(WIFSIGNALED(status))
+                REPORT("%s%s", sys_siglist[WTERMSIG(status)], WCOREDUMP(status)?" core dumped":"");
+            if(WIFSTOPPED(status))
+                REPORT("with %d stop-code", WSTOPSIG(status));
+            REPORT("\n");
+        }
+    }
+    else { // child
+        // reproduces error much often
+        execl(self, self, "0", NULL);
+        REPORT("exec fails %s: %d: %s\n", self, errno, strerror(errno));
+        exit(2);
+    }
+    return true;
+}
+
+HARNESS_EXPORT
+int main( int argc, char * argv[] ) {
+    MinThread = 3000;
+    ParseCommandLine( argc, argv );
+    if( MinThread <= 0 ) {
+        tbb::task_scheduler_init init( 2 ); // even number required for an error
+    } else {
+        for(int i = 0; i<MinThread; i++) {
+            if(exec_test(argv[0])) {
+                REPORT("ERROR: execution fails at %d-th iteration!\n", i);
+                exit(1);
+            }
+        }
+        REPORT("done\n");
+    }
+}
+
+#else /* !__APPLE__ */
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+
+int TestMain () {
+    return Harness::Skipped;
+}
+
+#endif /* !__APPLE__ */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_intrusive_list.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_intrusive_list.cpp
new file mode 100644 (file)
index 0000000..c4c4458
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+
+#include "../tbb/intrusive_list.h"
+
+#if __TBB_ARENA_PER_MASTER
+
+using tbb::internal::intrusive_list_node;
+
+// Machine word filled with repeated pattern of FC bits
+const uintptr_t NoliMeTangere = ~uintptr_t(0)/0xFF*0xFC;
+
+struct VerificationBase : Harness::NoAfterlife {
+    uintptr_t m_Canary;
+    VerificationBase () : m_Canary(NoliMeTangere) {}
+};
+
+struct DataItemWithInheritedNodeBase : intrusive_list_node {
+    int m_Data;
+public:
+    DataItemWithInheritedNodeBase ( int value ) : m_Data(value) {}
+
+    int Data() const { return m_Data; }
+};
+
+class DataItemWithInheritedNode : public VerificationBase, public DataItemWithInheritedNodeBase {
+    friend class tbb::internal::intrusive_list<DataItemWithInheritedNode>;
+public:
+    DataItemWithInheritedNode ( int value ) : DataItemWithInheritedNodeBase(value) {}
+};
+
+struct DataItemWithMemberNodeBase {
+    int m_Data;
+public:
+    // Cannot be used by member_intrusive_list to form lists of objects derived from DataItemBase
+    intrusive_list_node m_BaseNode;
+
+    DataItemWithMemberNodeBase ( int value ) : m_Data(value) {}
+
+    int Data() const { return m_Data; }
+};
+
+class DataItemWithMemberNodes : public VerificationBase, public DataItemWithMemberNodeBase {
+public:
+    intrusive_list_node m_Node;
+
+    DataItemWithMemberNodes ( int value ) : DataItemWithMemberNodeBase(value) {}
+};
+
+typedef tbb::internal::intrusive_list<DataItemWithInheritedNode> IntrusiveList1;
+typedef tbb::internal::memptr_intrusive_list<DataItemWithMemberNodes, 
+        DataItemWithMemberNodeBase, &DataItemWithMemberNodeBase::m_BaseNode> IntrusiveList2;
+typedef tbb::internal::memptr_intrusive_list<DataItemWithMemberNodes, 
+        DataItemWithMemberNodes, &DataItemWithMemberNodes::m_Node> IntrusiveList3;
+
+const int NumElements = 256 * 1024;
+
+//! Iterates through the list forward and backward checking the validity of values stored by the list nodes
+template<class List, class Iterator>
+void CheckListNodes ( List& il, int valueStep ) {
+    int i;
+    Iterator it = il.begin();
+    for ( i = valueStep - 1; it != il.end(); ++it, i += valueStep ) {
+        ASSERT( it->Data() == i, "Unexpected node value while iterating forward" );
+        ASSERT( (*it).m_Canary == NoliMeTangere, "Memory corruption" );
+    }
+    ASSERT( i == NumElements + valueStep - 1, "Wrong number of list elements while iterating forward" );
+    it = il.end();
+    for ( i = NumElements - 1, it--; it != il.end(); --it, i -= valueStep ) {
+        ASSERT( (*it).Data() == i, "Unexpected node value while iterating backward" );
+        ASSERT( it->m_Canary == NoliMeTangere, "Memory corruption" );
+    }
+    ASSERT( i == -1, "Wrong number of list elements while iterating backward" );
+}
+
+template<class List, class Item>
+void TestListOperations () {
+    typedef typename List::iterator iterator;
+    List il;
+    for ( int i = NumElements - 1; i >= 0; --i )
+        il.push_front( *new Item(i) );
+    CheckListNodes<const List, typename List::const_iterator>( il, 1 );
+    iterator it = il.begin();
+    for ( ; it != il.end(); ++it ) {
+        Item &item = *it;
+        it = il.erase( it );
+        delete &item;
+    }
+    CheckListNodes<List, iterator>( il, 2 );
+    for ( it = il.begin(); it != il.end(); ++it ) {
+        Item &item = *it;
+        il.remove( *it++ );
+        delete &item;
+    }
+    CheckListNodes<List, iterator>( il, 4 );
+}
+
+#include "harness_bad_expr.h"
+
+template<class List, class Item>
+void TestListAssertions () {
+#if TRY_BAD_EXPR_ENABLED
+    tbb::set_assertion_handler( AssertionFailureHandler );
+    List il1, il2;
+    Item n1(1), n2(2), n3(3);
+    il1.push_front(n1);
+    TRY_BAD_EXPR( il2.push_front(n1), "only one intrusive list" );
+    TRY_BAD_EXPR( il1.push_front(n1), "only one intrusive list" );
+    il2.push_front(n2);
+    TRY_BAD_EXPR( il1.remove(n3), "not in the list" );
+    tbb::set_assertion_handler( NULL );
+#endif /* TRY_BAD_EXPR_ENABLED */
+}
+#endif /* __TBB_ARENA_PER_MASTER */
+
+int TestMain () {
+#if __TBB_ARENA_PER_MASTER
+    TestListOperations<IntrusiveList1, DataItemWithInheritedNode>();
+    TestListOperations<IntrusiveList2, DataItemWithMemberNodes>();
+    TestListOperations<IntrusiveList3, DataItemWithMemberNodes>();
+    TestListAssertions<IntrusiveList1, DataItemWithInheritedNode>();
+    TestListAssertions<IntrusiveList2, DataItemWithMemberNodes>();
+    TestListAssertions<IntrusiveList3, DataItemWithMemberNodes>();
+    return Harness::Done;
+#else
+    return Harness::Skipped;
+#endif /* __TBB_ARENA_PER_MASTER */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ittnotify.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_ittnotify.cpp
new file mode 100644 (file)
index 0000000..c92e45b
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if !TBB_USE_THREADING_TOOLS
+    #define TBB_USE_THREADING_TOOLS 1
+#endif
+
+#include "harness.h"
+
+#if DO_ITT_NOTIFY
+
+#include "tbb/spin_mutex.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/queuing_rw_mutex.h"
+#include "tbb/queuing_mutex.h"
+#include "tbb/mutex.h"
+#include "tbb/recursive_mutex.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+#include "tbb/task_scheduler_init.h"
+
+
+#include "../tbb/itt_notify.h"
+
+
+template<typename M>
+class WorkEmulator: NoAssign {
+    M& m_mutex;
+    static volatile size_t s_anchor;
+public:
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            typename M::scoped_lock lock(m_mutex);
+            for ( size_t j = 0; j!=range.end(); ++j )
+                s_anchor = (s_anchor - i) / 2 + (s_anchor + j) / 2;
+        }
+    }
+    WorkEmulator( M& mutex ) : m_mutex(mutex) {}
+};
+
+template<typename M>
+volatile size_t WorkEmulator<M>::s_anchor = 0;
+
+
+template<class M>
+void Test( const char * name ) {
+    REMARK("%s time = ",name);
+    M mtx;
+    tbb::profiling::set_name(mtx, name);
+
+    const int n = 10000;
+    tbb::parallel_for( tbb::blocked_range<size_t>(0,n,n/100), WorkEmulator<M>(mtx) );
+}
+
+    #define TEST_MUTEX(type, name)  Test<tbb::type>( name )
+
+#endif /* !DO_ITT_NOTIFY */
+
+int TestMain () {
+#if DO_ITT_NOTIFY
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK( "testing with %d workers\n", p );
+        tbb::task_scheduler_init init( p );
+        TEST_MUTEX( spin_mutex, "Spin Mutex" );
+        TEST_MUTEX( queuing_mutex, "Queuing Mutex" );
+        TEST_MUTEX( queuing_rw_mutex, "Queuing RW Mutex" );
+        TEST_MUTEX( spin_rw_mutex, "Spin RW Mutex" );
+    }
+    return Harness::Done;
+#else /* !DO_ITT_NOTIFY */
+    return Harness::Skipped;
+#endif /* !DO_ITT_NOTIFY */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_lambda.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_lambda.cpp
new file mode 100644 (file)
index 0000000..8958877
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define NOMINMAX
+#include "tbb/tbb.h"
+#include "tbb/combinable.h"
+#include <cstdio>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <list>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+using namespace std;
+using namespace tbb;
+
+typedef pair<int,int> max_element_t;
+
+void f(int val, int *arr, int start, int stop) {
+    for (int i=start; i<=stop; ++i) {
+        arr[i] = val;
+    }
+}
+
+#include "harness.h"
+
+int Fib(int n) {
+    if( n<2 ) {
+        return n;
+    } else {
+        int x=0, y=0;
+        task_group g;
+#if __TBB_LAMBDAS_PRESENT
+        g.run( [&]{x=Fib(n-1);} ); // spawn a task
+        g.run( [&]{y=Fib(n-2);} ); // spawn another task
+        g.wait();                  // wait for both tasks to complete
+#endif
+        return x+y;
+    }
+}
+
+#include "harness_report.h"
+#include "harness_assert.h"
+
+int TestMain () {
+#if __TBB_LAMBDAS_PRESENT
+    const int N = 1000;
+    const int Grainsize = N/1000;
+    int a[N];
+    ASSERT( MinThread>=1, "Error: Number of threads must be positive.\n");
+
+    for(int p=MinThread; p<=MaxThread; ++p) {
+        task_scheduler_init init(p);
+
+        REMARK("Running lambda expression tests on %d threads...\n", p);
+
+        //test parallel_for
+        REMARK("Testing parallel_for... ");
+        parallel_for(blocked_range<int>(0,N,Grainsize),
+                     [&] (blocked_range<int>& r) {
+                         for (int i=r.begin(); i!=r.end(); ++i)    a[i] = i;
+                     });
+        ASSERT(a[0]==0 && a[N-1]==N-1, "parallel_for w/lambdas failed.\n");
+        REMARK("passed.\n");
+
+        //test parallel_reduce
+        REMARK("Testing parallel_reduce... ");
+        int sum = parallel_reduce(blocked_range<int>(0,N,Grainsize), int(0),
+                                  [&] (blocked_range<int>& r, int current_sum) -> int {
+                                      for (int i=r.begin(); i!=r.end(); ++i)
+                                          current_sum += a[i]*(1000-i);
+                                      return current_sum;
+                                  },
+                                  [] (const int x1, const int x2) {
+                                      return x1+x2;
+                                  } );
+
+        max_element_t max_el =
+            parallel_reduce(blocked_range<int>(0,N,Grainsize), make_pair(a[0], 0),
+                            [&] (blocked_range<int>& r, max_element_t current_max)
+                            -> max_element_t {
+                                for (int i=r.begin(); i!=r.end(); ++i)
+                                    if (a[i]>current_max.first)
+                                        current_max = make_pair(a[i], i);
+                                return current_max;
+                            },
+                            [] (const max_element_t x1, const max_element_t x2) {
+                                return (x1.first>x2.first)?x1:x2;
+                            });
+        ASSERT(sum==166666500 && max_el.first==999 && max_el.second==999,
+               "parallel_reduce w/lambdas failed.\n");
+        REMARK("passed.\n");
+
+        //test parallel_do
+        REMARK("Testing parallel_do... ");
+        list<int> s;
+        s.push_back(0);
+
+        parallel_do(s.begin(), s.end(),
+                    [&](int foo, parallel_do_feeder<int>& feeder) {
+                        if (foo == 42) return;
+                        else if (foo>42) {
+                            s.push_back(foo-3);
+                            feeder.add(foo-3);
+                        } else {
+                            s.push_back(foo+5);
+                            feeder.add(foo+5);
+                        }
+                    });
+        ASSERT(s.back()==42, "parallel_do w/lambda failed.\n");
+        REMARK("passed.\n");
+
+        //test parallel_invoke
+        REMARK("Testing parallel_invoke... ");
+        parallel_invoke([&]{ f(2, a, 0, N/3); },
+                        [&]{ f(1, a, N/3+1, 2*(N/3)); },
+                        [&]{ f(0, a, 2*(N/3)+1, N-1); });
+        ASSERT(a[0]==2.0 && a[N-1]==0.0, "parallel_invoke w/lambda failed.\n");
+        REMARK("passed.\n");
+
+        //test tbb_thread
+        REMARK("Testing tbb_thread... ");
+        tbb_thread::id myId;
+        tbb_thread myThread([](int x, int y) {
+                                ASSERT(x==42 && y==64, "tbb_thread w/lambda failed.\n");
+                                REMARK("passed.\n");
+                            }, 42, 64);
+        myThread.join();
+
+        // test task_group
+        REMARK("Testing task_group... ");
+        int result;
+        result = Fib(32);
+        ASSERT(result==2178309, "task_group w/lambda failed.\n");
+        REMARK("passed.\n");
+
+        // Reset array a to index values
+        parallel_for(blocked_range<int>(0,N,Grainsize),
+                     [&] (blocked_range<int>& r) {
+                         for (int i=r.begin(); i!=r.end(); ++i)    a[i] = i;
+                     });
+        // test parallel_sort
+        REMARK("Testing parallel_sort... ");
+        int pivot = 42;
+
+        // sort nearest by increasing distance from pivot
+        parallel_sort(a, a+N,
+                      [&](int x, int y) { return(abs(pivot-x) < abs(pivot-y)); });
+        ASSERT(a[0]==42 && a[N-1]==N-1, "parallel_sort w/lambda failed.\n");
+        REMARK("passed.\n");
+
+        //test combinable
+        REMARK("Testing combinable... ");
+        combinable<std::pair<int,int> > minmax_c([&]() { return std::make_pair(a[0], a[0]); } );
+
+        parallel_for(blocked_range<int>(0,N),
+                     [&] (const blocked_range<int> &r) {
+                         std::pair<int,int>& mmr = minmax_c.local();
+                         for(int i=r.begin(); i!=r.end(); ++i) {
+                             if (mmr.first > a[i]) mmr.first = a[i];
+                             if (mmr.second < a[i]) mmr.second = a[i];
+                         }
+                     });
+        minmax_c.combine_each([](std::pair<int,int> x) {
+                                  int sum;
+                                  sum = x.first + x.second;
+                              });
+        std::pair<int,int> minmax_result_c;
+        minmax_result_c =
+            minmax_c.combine([](std::pair<int,int> x, std::pair<int,int> y) {
+                                 return std::make_pair(x.first<y.first?x.first:y.first,
+                                                       x.second>y.second?x.second:y.second);
+                             });
+        ASSERT(minmax_result_c.first==0 && minmax_result_c.second==999, 
+               "combinable w/lambda failed.\n");
+        REMARK("passed.\n");
+
+        //test enumerable_thread_specific
+        REMARK("Testing enumerable_thread_specific... ");
+        enumerable_thread_specific< std::pair<int,int> > minmax_ets([&]() { return std::make_pair(a[0], a[0]); } );
+
+        parallel_for(blocked_range<int>(0,N),
+                     [&] (const blocked_range<int> &r) {
+                         std::pair<int,int>& mmr = minmax_ets.local();
+                         for(int i=r.begin(); i!=r.end(); ++i) {
+                             if (mmr.first > a[i]) mmr.first = a[i];
+                             if (mmr.second < a[i]) mmr.second = a[i];
+                         }
+                     });
+        minmax_ets.combine_each([](std::pair<int,int> x) {
+                                    int sum;
+                                    sum = x.first + x.second;
+                                });
+        std::pair<int,int> minmax_result_ets;
+        minmax_result_ets =
+            minmax_ets.combine([](std::pair<int,int> x, std::pair<int,int> y) {
+                                   return std::make_pair(x.first<y.first?x.first:y.first,
+                                                         x.second>y.second?x.second:y.second);
+                               });
+        ASSERT(minmax_result_ets.first==0 && minmax_result_ets.second==999,
+               "enumerable_thread_specific w/lambda failed.\n");
+        REMARK("passed.\n");
+    }
+    return Harness::Done;
+#else
+    return Harness::Skipped;
+#endif /* !__TBB_LAMBDAS_PRESENT */
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_atexit.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_atexit.cpp
new file mode 100644 (file)
index 0000000..ad742e2
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/* Regression test against bug in TBB allocator, manifested when 
+   dynamic library calls atexit or register dtors of static objects.
+   If the allocator is not initialized yet, we can got deadlock, 
+   because allocator library has static object dtors as well, they
+   registred during allocator initialization, and atexit is protected 
+   by non-recursive mutex in some GLIBCs.
+ */
+
+#if _USRDLL
+
+#include <stdlib.h>
+
+#if _WIN32||_WIN64
+// isMallocOverloaded must be defined in DLL to linker not drop the dependence
+// to the DLL.
+extern __declspec(dllexport) bool isMallocOverloaded();
+
+bool isMallocOverloaded()
+{
+    return true;
+}
+
+#else
+
+#include <dlfcn.h>
+
+bool isMallocOverloaded()
+{
+    return dlsym(RTLD_DEFAULT, "__TBB_malloc_proxy");
+}
+
+#endif    
+
+#ifndef _PGO_INSTRUMENT
+void dummyFunction() {}
+
+class Foo {
+public:
+    Foo() {
+        // add a lot of exit handlers to cause memory allocation
+        for (int i=0; i<1024; i++)
+            atexit(dummyFunction);
+    }
+};
+
+static Foo f;
+#endif
+
+#else // _USRDLL
+#include "harness.h"
+
+#if _WIN32||_WIN64
+extern __declspec(dllimport)
+#endif
+bool isMallocOverloaded();
+
+int TestMain () {
+#ifdef _PGO_INSTRUMENT
+    REPORT("Known issue: test_malloc_atexit hangs if compiled with -prof-genx\n");
+    return Harness::Skipped;
+#else
+    return isMallocOverloaded()? Harness::Done : Harness::Skipped;
+#endif
+}
+
+#endif // _USRDLL
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_compliance.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_compliance.cpp
new file mode 100644 (file)
index 0000000..e91643b
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+const int MByte = 1048576; //1MB
+bool __tbb_test_errno = false;
+
+/* _WIN32_WINNT should be defined at the very beginning, 
+   because other headers might include <windows.h>
+*/
+
+#if _WIN32 || _WIN64 && !__MINGW64__
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x0500
+#include "tbb/machine/windows_api.h"
+#include <stdio.h>
+#include "harness_report.h"
+
+void limitMem( int limit )
+{
+    static HANDLE hJob = NULL;
+    JOBOBJECT_EXTENDED_LIMIT_INFORMATION jobInfo;
+
+    jobInfo.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_PROCESS_MEMORY;
+    jobInfo.ProcessMemoryLimit = limit? limit*MByte : 2*1024LL*MByte;
+    if (NULL == hJob) {
+        if (NULL == (hJob = CreateJobObject(NULL, NULL))) {
+            REPORT("Can't assign create job object: %ld\n", GetLastError());
+            exit(1);
+        }
+        if (0 == AssignProcessToJobObject(hJob, GetCurrentProcess())) {
+            REPORT("Can't assign process to job object: %ld\n", GetLastError());
+            exit(1);
+        }
+    }
+    if (0 == SetInformationJobObject(hJob, JobObjectExtendedLimitInformation, 
+                                     &jobInfo, sizeof(jobInfo))) {
+        REPORT("Can't set limits: %ld\n", GetLastError());
+        exit(1);
+    }
+}
+// Do not test errno with static VC runtime
+#else
+#include <sys/resource.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/types.h>  // uint64_t on FreeBSD, needed for rlim_t
+#include "harness_report.h"
+
+void limitMem( int limit )
+{
+    rlimit rlim;
+    rlim.rlim_cur = limit? limit*MByte : (rlim_t)RLIM_INFINITY;
+    rlim.rlim_max = (rlim_t)RLIM_INFINITY;
+    int ret = setrlimit(RLIMIT_AS,&rlim);
+    if (0 != ret) {
+        REPORT("Can't set limits: errno %d\n", errno);
+        exit(1);
+    }
+}
+#endif 
+
+#define ASSERT_ERRNO(cond, msg)  ASSERT( !__tbb_test_errno || (cond), msg )
+#define CHECK_ERRNO(cond) (__tbb_test_errno && (cond))
+
+#include <time.h>
+#include <errno.h>
+#define __TBB_NO_IMPLICIT_LINKAGE 1
+#include "tbb/scalable_allocator.h"
+#include "tbb/tbb_machine.h"
+
+#define HARNESS_CUSTOM_MAIN 1
+#include "harness.h"
+#include "harness_barrier.h"
+#if __linux__
+#include <stdint.h> // uintptr_t
+#endif
+#if _WIN32 || _WIN64
+#include <malloc.h> // _aligned_(malloc|free|realloc)
+#endif
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <vector>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+const size_t COUNT_ELEM_CALLOC = 2;
+const int COUNT_TESTS = 1000;
+const int COUNT_ELEM = 25000;
+const size_t MAX_SIZE = 1000;
+const int COUNTEXPERIMENT = 10000;
+
+const char strError[]="failed";
+const char strOk[]="done";
+
+typedef unsigned int UINT;
+typedef unsigned char UCHAR;
+typedef unsigned long DWORD;
+typedef unsigned char BYTE;
+
+
+typedef void* TestMalloc(size_t size);
+typedef void* TestCalloc(size_t num, size_t size);
+typedef void* TestRealloc(void* memblock, size_t size);
+typedef void  TestFree(void* memblock);
+typedef int   TestPosixMemalign(void **memptr, size_t alignment, size_t size);
+typedef void* TestAlignedMalloc(size_t size, size_t alignment);
+typedef void* TestAlignedRealloc(void* memblock, size_t size, size_t alignment);
+typedef void  TestAlignedFree(void* memblock);
+
+TestMalloc*  Tmalloc;
+TestCalloc*  Tcalloc;
+TestRealloc* Trealloc;
+TestFree*    Tfree;
+TestAlignedFree* Taligned_free;
+// call alignment-related function via pointer and check result's alignment
+int   Tposix_memalign(void **memptr, size_t alignment, size_t size);
+void* Taligned_malloc(size_t size, size_t alignment);
+void* Taligned_realloc(void* memblock, size_t size, size_t alignment);
+
+// pointers to alignment-related functions used while testing
+TestPosixMemalign*  Rposix_memalign;
+TestAlignedMalloc*  Raligned_malloc;
+TestAlignedRealloc* Raligned_realloc;
+
+bool error_occurred = false;
+
+#if __APPLE__
+// Tests that use the variable are skipped on Mac OS* X
+#else
+static bool perProcessLimits = true;
+#endif
+
+const size_t POWERS_OF_2 = 20;
+
+#if __linux__  && __ia64__
+/* Can't use Intel compiler intrinsic due to internal error reported by
+   10.1 compiler */
+pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value)
+{
+    pthread_mutex_lock(&counter_mutex);
+    int32_t result = *(int32_t*)ptr;
+    *(int32_t*)ptr = result + value;
+    pthread_mutex_unlock(&counter_mutex);
+    return result;
+}
+
+void __TBB_machine_pause(int32_t /*delay*/) {}
+
+#elif (_WIN32||_WIN64) && defined(_M_AMD64) && !__MINGW64__
+
+void __TBB_machine_pause(__int32 /*delay*/ ) {}
+
+#endif
+
+struct MemStruct
+{
+    void* Pointer;
+    UINT Size;
+
+    MemStruct() : Pointer(NULL), Size(0) {}
+    MemStruct(void* Pointer, UINT Size) : Pointer(Pointer), Size(Size) {}
+};
+
+class CMemTest: NoAssign
+{
+    UINT CountErrors;
+    bool FullLog;
+    Harness::SpinBarrier *limitBarrier;
+    static bool firstTime;
+
+public:
+    CMemTest(Harness::SpinBarrier *limitBarrier, bool isVerbose=false) :
+        CountErrors(0), limitBarrier(limitBarrier)
+        {
+            srand((UINT)time(NULL));
+            FullLog=isVerbose;
+            rand();
+        }
+    void InvariantDataRealloc(bool aligned); //realloc does not change data
+    void NULLReturn(UINT MinSize, UINT MaxSize, int total_threads); // NULL pointer + check errno
+    void UniquePointer(); // unique pointer - check with padding
+    void AddrArifm(); // unique pointer - check with pointer arithmetic
+    bool ShouldReportError();
+    void Free_NULL(); // 
+    void Zerofilling(); // check if arrays are zero-filled
+    void TestAlignedParameters();
+    void RunAllTests(int total_threads);
+    ~CMemTest() {}
+};
+
+class Limit {
+    int limit;
+public:
+    Limit(int limit) : limit(limit) {}
+    void operator() () const {
+        limitMem(limit);
+    }
+};
+
+int argC;
+char** argV;
+
+struct RoundRobin: NoAssign {
+    const long number_of_threads;
+    mutable CMemTest test;
+
+    RoundRobin( long p, Harness::SpinBarrier *limitBarrier, bool verbose ) :
+        number_of_threads(p), test(limitBarrier, verbose) {}
+    void operator()( int /*id*/ ) const 
+        {
+            test.RunAllTests(number_of_threads);
+        }
+};
+
+bool CMemTest::firstTime = true;
+
+static void setSystemAllocs()
+{
+    Tmalloc=malloc;
+    Trealloc=realloc;
+    Tcalloc=calloc;
+    Tfree=free;
+#if (_WIN32 || _WIN64) && !__MINGW64__ && !__MINGW32__
+    Raligned_malloc=_aligned_malloc;
+    Raligned_realloc=_aligned_realloc;
+    Taligned_free=_aligned_free;
+    Rposix_memalign=0;
+#elif  __APPLE__ || __sun || __MINGW64__ || __MINGW32__ //  Max OS X MinGW and Solaris don't have posix_memalign
+    Raligned_malloc=0;
+    Raligned_realloc=0;
+    Taligned_free=0;
+    Rposix_memalign=0;
+#else 
+    Raligned_malloc=0;
+    Raligned_realloc=0;
+    Taligned_free=0;
+    Rposix_memalign=posix_memalign;
+#endif
+}
+
+// check that realloc works as free and as malloc
+void ReallocParam()
+{
+    const int ITERS = 1000;
+    int i;
+    void *bufs[ITERS];
+
+    bufs[0] = Trealloc(NULL, 30*MByte);
+    ASSERT(bufs[0], "Can't get memory to start the test.");
+  
+    for (i=1; i<ITERS; i++)
+    {
+        bufs[i] = Trealloc(NULL, 30*MByte);
+        if (NULL == bufs[i])
+            break;
+    }
+    ASSERT(i<ITERS, "Limits should be decreased for the test to work.");
+  
+    Trealloc(bufs[0], 0);
+    /* There is a race for the free space between different threads at 
+       this point. So, have to run the test sequentially.
+    */
+    bufs[0] = Trealloc(NULL, 30*MByte);
+    ASSERT(bufs[0], NULL);
+  
+    for (int j=0; j<i; j++)
+        Trealloc(bufs[j], 0);
+}
+
+HARNESS_EXPORT
+int main(int argc, char* argv[]) {
+    argC=argc;
+    argV=argv;
+    MaxThread = MinThread = 1;
+    Tmalloc=scalable_malloc;
+    Trealloc=scalable_realloc;
+    Tcalloc=scalable_calloc;
+    Tfree=scalable_free;
+    Rposix_memalign=scalable_posix_memalign;
+    Raligned_malloc=scalable_aligned_malloc;
+    Raligned_realloc=scalable_aligned_realloc;
+    Taligned_free=scalable_aligned_free;
+
+    // check if we were called to test standard behavior
+    for (int i=1; i< argc; i++) {
+        if (strcmp((char*)*(argv+i),"-s")==0)
+        {
+            setSystemAllocs();
+            argC--;
+            break;
+        }
+    }
+
+    ParseCommandLine( argC, argV );
+#if __linux__
+    /* According to man pthreads 
+       "NPTL threads do not share resource limits (fixed in kernel 2.6.10)".
+       Use per-threads limits for affected systems.
+     */
+    if ( LinuxKernelVersion() < 2*1000000 + 6*1000 + 10)
+        perProcessLimits = false;
+#endif    
+    //-------------------------------------
+#if __APPLE__
+    /* Skip due to lack of memory limit enforcing under Mac OS X. */
+#else
+    limitMem(200);
+    ReallocParam();
+    limitMem(0);
+#endif
+    
+//for linux and dynamic runtime errno is used to check allocator fuctions
+//check if library compiled with /MD(d) and we can use errno
+#if _MSC_VER 
+#if defined(_MT) && defined(_DLL) //check errno if test itself compiled with /MD(d) only
+    #pragma comment(lib, "version.lib")
+    char*  version_info_block = NULL;
+    int version_info_block_size; 
+    LPVOID comments_block = NULL;
+    UINT comments_block_size;
+#ifdef _DEBUG
+#define __TBBMALLOCDLL "tbbmalloc_debug.dll"
+#else  //_DEBUG
+#define __TBBMALLOCDLL "tbbmalloc.dll"
+#endif //_DEBUG
+    version_info_block_size = GetFileVersionInfoSize( __TBBMALLOCDLL, (LPDWORD)&version_info_block_size );
+    if( version_info_block_size 
+        && ((version_info_block = (char*)malloc(version_info_block_size)) != NULL)
+        && GetFileVersionInfo(  __TBBMALLOCDLL, NULL, version_info_block_size, version_info_block )
+        && VerQueryValue( version_info_block, "\\StringFileInfo\\000004b0\\Comments", &comments_block, &comments_block_size )
+        && strstr( (char*)comments_block, "/MD" )
+        ){
+            __tbb_test_errno = true;
+     }
+     if( version_info_block ) free( version_info_block );
+#endif // defined(_MT) && defined(_DLL)
+#else  // _MSC_VER
+    __tbb_test_errno = true;
+#endif // _MSC_VER
+
+    for( int p=MaxThread; p>=MinThread; --p ) {
+        REMARK("testing with %d threads\n", p );
+        Harness::SpinBarrier *barrier = new Harness::SpinBarrier(p);
+        NativeParallelFor( p, RoundRobin(p, barrier, Verbose) );
+        delete barrier;
+    }
+    if( !error_occurred ) 
+        REPORT("done\n");
+    return 0;
+}
+
+struct TestStruct
+{
+    DWORD field1:2;
+    DWORD field2:6;
+    double field3;
+    UCHAR field4[100];
+    TestStruct* field5;
+//  std::string field6;
+    std::vector<int> field7;
+    double field8;
+    bool IsZero() {
+        int wordSz = sizeof(TestStruct) / sizeof(intptr_t);
+        int tailSz = sizeof(TestStruct) % sizeof(intptr_t);
+
+        intptr_t *buf =(intptr_t*)this;
+        char *bufTail =(char*) (buf+wordSz);
+
+        for (int i=0; i<wordSz; i++)
+            if (buf[i]) return false;
+        for (int i=0; i<tailSz; i++)
+            if (bufTail[i]) return false;
+        return true;
+    }
+};
+
+int Tposix_memalign(void **memptr, size_t alignment, size_t size)
+{
+    int ret = Rposix_memalign(memptr, alignment, size);
+    if (0 == ret)
+        ASSERT(0==((uintptr_t)*memptr & (alignment-1)),
+               "allocation result should be aligned");
+    return ret;
+}
+void* Taligned_malloc(size_t size, size_t alignment)
+{
+    void *ret = Raligned_malloc(size, alignment);
+    if (0 != ret)
+        ASSERT(0==((uintptr_t)ret & (alignment-1)),
+               "allocation result should be aligned");
+    return ret;
+}
+void* Taligned_realloc(void* memblock, size_t size, size_t alignment)
+{
+    void *ret = Raligned_realloc(memblock, size, alignment);
+    if (0 != ret)
+        ASSERT(0==((uintptr_t)ret & (alignment-1)),
+               "allocation result should be aligned");
+    return ret;
+}
+
+inline size_t choose_random_alignment() {
+    return sizeof(void*)<<(rand() % POWERS_OF_2);
+}
+
+void CMemTest::InvariantDataRealloc(bool aligned)
+{
+    size_t size, sizeMin;
+    CountErrors=0;
+    if (FullLog) REPORT("\nInvariant data by realloc....");
+    UCHAR* pchar;
+    sizeMin=size=rand()%MAX_SIZE+10;
+    pchar = aligned?
+        (UCHAR*)Taligned_realloc(NULL,size,choose_random_alignment())
+        : (UCHAR*)Trealloc(NULL,size);
+    if (NULL == pchar)
+        return;
+    for (size_t k=0; k<size; k++)
+        pchar[k]=(UCHAR)k%255+1;
+    for (int i=0; i<COUNTEXPERIMENT; i++)
+    {
+        size=rand()%MAX_SIZE+10;
+        UCHAR *pcharNew = aligned?
+            (UCHAR*)Taligned_realloc(pchar,size, choose_random_alignment())
+            : (UCHAR*)Trealloc(pchar,size);
+        if (NULL == pcharNew)
+            continue;
+        pchar = pcharNew;
+        sizeMin=size<sizeMin ? size : sizeMin;
+        for (size_t k=0; k<sizeMin; k++)
+            if (pchar[k] != (UCHAR)k%255+1)
+            {
+                CountErrors++;
+                if (ShouldReportError())
+                {
+                    REPORT("stand '%c', must stand '%c'\n",pchar[k],(UCHAR)k%255+1);
+                    REPORT("error: data changed (at %llu, SizeMin=%llu)\n",
+                           (long long unsigned)k,(long long unsigned)sizeMin);
+                }
+            }
+    }
+    if (aligned)
+        Taligned_realloc(pchar,0,choose_random_alignment());
+    else
+        Trealloc(pchar,0);
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    //REPORT("end check\n");
+}
+
+struct PtrSize {
+    void  *ptr;
+    size_t size;
+};
+
+static int cmpAddrs(const void *p1, const void *p2)
+{
+    const PtrSize *a = (const PtrSize *)p1;
+    const PtrSize *b = (const PtrSize *)p2;
+
+    return a->ptr < b->ptr ? -1 : ( a->ptr == b->ptr ? 0 : 1);
+}
+
+void CMemTest::AddrArifm()
+{
+    PtrSize *arr = (PtrSize*)Tmalloc(COUNT_ELEM*sizeof(PtrSize));
+
+    if (FullLog) REPORT("\nUnique pointer using Address arithmetics\n");
+    if (FullLog) REPORT("malloc....");
+    ASSERT(arr, NULL);
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        arr[i].size=rand()%MAX_SIZE;
+        arr[i].ptr=Tmalloc(arr[i].size);
+    }
+    qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs);
+
+    for (int i=0; i<COUNT_ELEM-1; i++)
+    {
+        if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr)
+            ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr,
+                   "intersection detected");
+    }
+    //----------------------------------------------------------------
+    if (FullLog) REPORT("realloc....");
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        size_t count=arr[i].size*2;
+        void *tmpAddr=Trealloc(arr[i].ptr,count);
+        if (NULL!=tmpAddr) {
+            arr[i].ptr = tmpAddr;
+            arr[i].size = count;
+        } else if (count==0) { // becasue realloc(..., 0) works as free
+            arr[i].ptr = NULL;
+            arr[i].size = 0;
+        }
+    }
+    qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs);
+
+    for (int i=0; i<COUNT_ELEM-1; i++)
+    {
+        if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr)
+            ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr,
+                   "intersection detected");
+    }
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        Tfree(arr[i].ptr);
+    }
+    //-------------------------------------------
+    if (FullLog) REPORT("calloc....");
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        arr[i].size=rand()%MAX_SIZE;
+        arr[i].ptr=Tcalloc(arr[i].size,1);
+    }
+    qsort(arr, COUNT_ELEM, sizeof(PtrSize), cmpAddrs);
+
+    for (int i=0; i<COUNT_ELEM-1; i++)
+    {
+        if (NULL!=arr[i].ptr && NULL!=arr[i+1].ptr)
+            ASSERT((uintptr_t)arr[i].ptr+arr[i].size <= (uintptr_t)arr[i+1].ptr,
+                   "intersection detected");
+    }
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        Tfree(arr[i].ptr);
+    }
+    Tfree(arr);
+}
+
+void CMemTest::Zerofilling()
+{
+    TestStruct* TSMas;
+    size_t CountElement;
+    CountErrors=0;
+    if (FullLog) REPORT("\nzeroings elements of array....");
+    //test struct
+    for (int i=0; i<COUNTEXPERIMENT; i++)
+    {
+        CountElement=rand()%MAX_SIZE;
+        TSMas=(TestStruct*)Tcalloc(CountElement,sizeof(TestStruct));
+        if (NULL == TSMas)
+            continue;
+        for (size_t j=0; j<CountElement; j++)
+        {
+            if (!(TSMas+j)->IsZero())
+            {
+                CountErrors++;
+                if (ShouldReportError()) REPORT("detect nonzero element at TestStruct\n");
+            }
+        }
+        Tfree(TSMas);
+    }
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+}
+
+#if !__APPLE__
+void CMemTest::NULLReturn(UINT MinSize, UINT MaxSize, int total_threads)
+{
+    // find size to guarantee getting NULL for 1024 B allocations
+    const int MAXNUM_1024 = (200+50)*1024;
+
+    std::vector<MemStruct> PointerList;
+    void *tmp;
+    CountErrors=0;
+    int CountNULL, num_1024;
+    if (FullLog) REPORT("\nNULL return & check errno:\n");
+    UINT Size;
+    Limit limit_200M(200*total_threads), no_limit(0);
+    void **buf_1024 = (void**)Tmalloc(MAXNUM_1024*sizeof(void*));
+
+    ASSERT(buf_1024, NULL);
+    /* We must have space for pointers when memory limit is hit. 
+       Reserve enough for the worst case. 
+    */
+    PointerList.reserve(200*MByte/MinSize);
+
+    /* There is a bug in the specific verion of GLIBC (2.5-12) shipped 
+       with RHEL5 that leads to erroneous working of the test 
+       on Intel64 and IPF systems when setrlimit-related part is enabled.
+       Switching to GLIBC 2.5-18 from RHEL5.1 resolved the issue.
+     */
+    if (perProcessLimits)
+        limitBarrier->wait(limit_200M);
+    else
+        limitMem(200);
+
+    /* regression test against the bug in allocator when it dereference NULL 
+       while lack of memory 
+    */
+    for (num_1024=0; num_1024<MAXNUM_1024; num_1024++) {
+        buf_1024[num_1024] = Tcalloc(1024, 1);
+        if (! buf_1024[num_1024]) {
+            ASSERT_ERRNO(errno == ENOMEM, NULL);
+            break;
+        }
+    }
+    for (int i=0; i<num_1024; i++)
+        Tfree(buf_1024[i]);
+    Tfree(buf_1024);
+
+    do {
+        Size=rand()%(MaxSize-MinSize)+MinSize;
+        tmp=Tmalloc(Size);
+        if (tmp != NULL)
+        {
+            memset(tmp, 0, Size);
+            PointerList.push_back(MemStruct(tmp, Size));
+        }
+    } while(tmp != NULL);
+    ASSERT_ERRNO(errno == ENOMEM, NULL);
+    if (FullLog) REPORT("\n");
+
+    // preparation complete, now running tests
+    // malloc
+    if (FullLog) REPORT("malloc....");
+    CountNULL = 0;
+    while (CountNULL==0)
+        for (int j=0; j<COUNT_TESTS; j++)
+        {
+            Size=rand()%(MaxSize-MinSize)+MinSize;
+            errno = ENOMEM+j+1;
+            tmp=Tmalloc(Size);
+            if (tmp == NULL)
+            {
+                CountNULL++;
+                if ( CHECK_ERRNO(errno != ENOMEM) ) {
+                    CountErrors++;
+                    if (ShouldReportError()) REPORT("NULL returned, error: errno (%d) != ENOMEM\n", errno);
+                }
+            }
+            else
+            {
+                // Technically, if malloc returns a non-NULL pointer, it is allowed to set errno anyway.
+                // However, on most systems it does not set errno.
+                bool known_issue = false;
+#if __linux__
+                if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true;
+#endif /* __linux__ */
+                if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue) {
+                    CountErrors++;
+                    if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno);
+                }
+                memset(tmp, 0, Size);
+                PointerList.push_back(MemStruct(tmp, Size));
+            }
+        }
+    if (FullLog) REPORT("end malloc\n");
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+
+    CountErrors=0;
+    //calloc
+    if (FullLog) REPORT("calloc....");
+    CountNULL = 0;
+    while (CountNULL==0)
+        for (int j=0; j<COUNT_TESTS; j++)
+        {
+            Size=rand()%(MaxSize-MinSize)+MinSize;
+            errno = ENOMEM+j+1;
+            tmp=Tcalloc(COUNT_ELEM_CALLOC,Size);  
+            if (tmp == NULL)
+            {
+                CountNULL++;
+                if ( CHECK_ERRNO(errno != ENOMEM) ){
+                    CountErrors++;
+                    if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno);
+                }
+            }
+            else
+            {
+                // Technically, if calloc returns a non-NULL pointer, it is allowed to set errno anyway.
+                // However, on most systems it does not set errno.
+                bool known_issue = false;
+#if __linux__
+                if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true;
+#endif /* __linux__ */
+                if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue ) {
+                    CountErrors++;
+                    if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno);
+                }      
+                PointerList.push_back(MemStruct(tmp, Size));
+            }
+        }
+    if (FullLog) REPORT("end calloc\n");
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    CountErrors=0;
+    if (FullLog) REPORT("realloc....");
+    CountNULL = 0;
+    if (PointerList.size() > 0)
+        while (CountNULL==0)
+            for (size_t i=0; i<(size_t)COUNT_TESTS && i<PointerList.size(); i++)
+            {
+                errno = 0;
+                tmp=Trealloc(PointerList[i].Pointer,PointerList[i].Size*2);
+                if (PointerList[i].Pointer == tmp) // the same place
+                {
+                    bool known_issue = false;
+#if __linux__
+                    if( errno==ENOMEM ) known_issue = true;
+#endif /* __linux__ */
+                    if (errno != 0 && !known_issue) {
+                        CountErrors++;
+                        if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n");
+                    }      
+                    PointerList[i].Size *= 2;
+                }
+                else if (tmp != PointerList[i].Pointer && tmp != NULL) // another place
+                {
+                    bool known_issue = false;
+#if __linux__
+                    if( errno==ENOMEM ) known_issue = true;
+#endif /* __linux__ */
+                    if (errno != 0 && !known_issue) {
+                        CountErrors++;
+                        if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n");
+                    }
+                    // newly allocated area have to be zeroed
+                    memset((char*)tmp + PointerList[i].Size, 0, PointerList[i].Size);
+                    PointerList[i].Pointer = tmp;
+                    PointerList[i].Size *= 2;
+                }
+                else if (tmp == NULL)
+                {
+                    CountNULL++;
+                    if ( CHECK_ERRNO(errno != ENOMEM) )
+                    {
+                        CountErrors++;
+                        if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno);
+                    }
+                    // check data integrity
+                    BYTE *zer=(BYTE*)PointerList[i].Pointer;
+                    for (UINT k=0; k<PointerList[i].Size; k++)
+                        if (zer[k] != 0)
+                        {
+                            CountErrors++;
+                            if (ShouldReportError()) REPORT("NULL returned, error: data changed\n");
+                        }
+                }
+            }
+    if (FullLog) REPORT("realloc end\n");
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    for (UINT i=0; i<PointerList.size(); i++)
+    {
+        Tfree(PointerList[i].Pointer);
+    }
+
+    if (perProcessLimits)
+        limitBarrier->wait(no_limit);
+    else
+        limitMem(0);
+}
+#endif /* #if __APPLE__ */
+
+void CMemTest::UniquePointer()
+{
+    CountErrors=0;
+    int **MasPointer = (int **)Tmalloc(sizeof(int*)*COUNT_ELEM);
+    size_t *MasCountElem = (size_t*)Tmalloc(sizeof(size_t)*COUNT_ELEM);
+    if (FullLog) REPORT("\nUnique pointer using 0\n");
+    ASSERT(MasCountElem && MasPointer, NULL);
+    //
+    //-------------------------------------------------------
+    //malloc
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        MasCountElem[i]=rand()%MAX_SIZE;
+        MasPointer[i]=(int*)Tmalloc(MasCountElem[i]*sizeof(int));
+        if (NULL == MasPointer[i])
+            MasCountElem[i]=0;
+        for (UINT j=0; j<MasCountElem[i]; j++)
+            *(MasPointer[i]+j)=0;
+    }
+    if (FullLog) REPORT("malloc....");
+    for (UINT i=0; i<COUNT_ELEM-1; i++)
+    {
+        for (UINT j=0; j<MasCountElem[i]; j++)
+        {
+            if (*(*(MasPointer+i)+j)!=0)
+            {
+                CountErrors++;
+                if (ShouldReportError()) REPORT("error, detect 1 with 0x%p\n",(*(MasPointer+i)+j));
+            }
+            *(*(MasPointer+i)+j)+=1;
+        }
+    }
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    //----------------------------------------------------------
+    //calloc
+    for (int i=0; i<COUNT_ELEM; i++)
+        Tfree(MasPointer[i]);
+    CountErrors=0;
+    for (long i=0; i<COUNT_ELEM; i++)
+    {
+        MasPointer[i]=(int*)Tcalloc(MasCountElem[i]*sizeof(int),2);
+        if (NULL == MasPointer[i])
+            MasCountElem[i]=0;
+    }
+    if (FullLog) REPORT("calloc....");
+    for (int i=0; i<COUNT_ELEM-1; i++)
+    {
+        for (UINT j=0; j<*(MasCountElem+i); j++)
+        {
+            if (*(*(MasPointer+i)+j)!=0)
+            {
+                CountErrors++;
+                if (ShouldReportError()) REPORT("error, detect 1 with 0x%p\n",(*(MasPointer+i)+j));
+            }
+            *(*(MasPointer+i)+j)+=1;
+        }
+    }
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    //---------------------------------------------------------
+    //realloc
+    CountErrors=0;
+    for (int i=0; i<COUNT_ELEM; i++)
+    {
+        MasCountElem[i]*=2;
+        *(MasPointer+i)=
+            (int*)Trealloc(*(MasPointer+i),MasCountElem[i]*sizeof(int));
+        if (NULL == MasPointer[i])
+            MasCountElem[i]=0;
+        for (UINT j=0; j<MasCountElem[i]; j++)
+            *(*(MasPointer+i)+j)=0;
+    }
+    if (FullLog) REPORT("realloc....");
+    for (int i=0; i<COUNT_ELEM-1; i++)
+    {
+        for (UINT j=0; j<*(MasCountElem+i); j++)
+        {
+            if (*(*(MasPointer+i)+j)!=0)
+            {
+                CountErrors++;
+            }
+            *(*(MasPointer+i)+j)+=1;
+        }
+    }
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+    for (int i=0; i<COUNT_ELEM; i++)
+        Tfree(MasPointer[i]);
+    Tfree(MasCountElem);
+    Tfree(MasPointer);
+}
+
+bool CMemTest::ShouldReportError()
+{
+    if (FullLog)
+        return true;
+    else
+        if (firstTime) {
+            firstTime = false;
+            return true;
+        } else
+            return false;
+}
+
+void CMemTest::Free_NULL()
+{
+    CountErrors=0;
+    if (FullLog) REPORT("\ncall free with parameter NULL....");
+    errno = 0;
+    for (int i=0; i<COUNTEXPERIMENT; i++)
+    {
+        Tfree(NULL);
+        if (errno != 0)
+        {
+            CountErrors++;
+            if (ShouldReportError()) REPORT("error is found by a call free with parameter NULL\n");
+        }
+    }
+    if (CountErrors) REPORT("%s\n",strError);
+    else if (FullLog) REPORT("%s\n",strOk);
+    error_occurred |= ( CountErrors>0 ) ;
+}
+
+void CMemTest::TestAlignedParameters()
+{
+    void *memptr;
+    int ret;
+
+    if (Rposix_memalign) {
+        // alignment isn't power of 2
+        for (int bad_align=3; bad_align<16; bad_align++)
+            if (bad_align&(bad_align-1)) {
+                ret = Tposix_memalign(NULL, bad_align, 100);
+                ASSERT(EINVAL==ret, NULL);
+            }
+    
+        memptr = &ret;
+        ret = Tposix_memalign(&memptr, 5*sizeof(void*), 100);
+        ASSERT(memptr == &ret,
+               "memptr should not be changed after unsuccesful call");
+        ASSERT(EINVAL==ret, NULL);
+    
+        // alignment is power of 2, but not a multiple of sizeof(void *),
+        // we expect that sizeof(void*) > 2
+        ret = Tposix_memalign(NULL, 2, 100);
+        ASSERT(EINVAL==ret, NULL);
+    }
+    if (Raligned_malloc) {
+        // alignment isn't power of 2
+        for (int bad_align=3; bad_align<16; bad_align++)
+            if (bad_align&(bad_align-1)) {
+                memptr = Taligned_malloc(100, bad_align);
+                ASSERT(NULL==memptr, NULL);
+                ASSERT_ERRNO(EINVAL==errno, NULL);
+            }
+    
+        // size is zero
+        memptr = Taligned_malloc(0, 16);
+        ASSERT(NULL==memptr, "size is zero, so must return NULL");
+        ASSERT_ERRNO(EINVAL==errno, NULL);
+    }
+    if (Taligned_free) {
+        // NULL pointer is OK to free
+        errno = 0;
+        Taligned_free(NULL);
+        /* As there is no return value for free, strictly speaking we can't 
+           check errno here. But checked implementations obey the assertion.
+        */
+        ASSERT_ERRNO(0==errno, NULL);
+    }
+    if (Raligned_realloc) {
+        for (int i=1; i<20; i++) {
+            // checks that calls work correctly in presence of non-zero errno
+            errno = i;
+            void *ptr = Taligned_malloc(i*10, 128);
+            ASSERT(NULL!=ptr, NULL);
+            ASSERT_ERRNO(0!=errno, NULL);
+            // if size is zero and pointer is not NULL, works like free
+            memptr = Taligned_realloc(ptr, 0, 64);
+            ASSERT(NULL==memptr, NULL);
+            ASSERT_ERRNO(0!=errno, NULL);
+        }
+        // alignment isn't power of 2
+        for (int bad_align=3; bad_align<16; bad_align++)
+            if (bad_align&(bad_align-1)) {
+                void *ptr = &bad_align;
+                memptr = Taligned_realloc(&ptr, 100, bad_align);
+                ASSERT(NULL==memptr, NULL);
+                ASSERT(&bad_align==ptr, NULL);
+                ASSERT_ERRNO(EINVAL==errno, NULL);
+            }
+    }
+}
+
+void CMemTest::RunAllTests(int total_threads)
+{
+    Zerofilling();
+    Free_NULL();
+    InvariantDataRealloc(/*aligned=*/false);
+    if (Raligned_realloc)
+        InvariantDataRealloc(/*aligned=*/true);
+    TestAlignedParameters();
+#if __APPLE__
+    REPORT("Known issue: some tests are skipped on Mac OS* X\n");
+#else
+    UniquePointer();
+    AddrArifm();
+#if !__TBB_MIC_NATIVE
+    NULLReturn(1*MByte,100*MByte,total_threads);
+#endif
+#endif
+    if (FullLog) REPORT("All tests ended\nclearing memory...");
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_init_shutdown.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_init_shutdown.cpp
new file mode 100644 (file)
index 0000000..92a69bc
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/scalable_allocator.h"
+#include "tbb/atomic.h"
+#include "tbb/aligned_space.h"
+#include "../tbb/tbb_assert_impl.h"
+
+#if _WIN64 && defined(_M_AMD64) && !__MINGW64__
+void __TBB_machine_pause(__int32 /*delay*/ ) {}
+#elif  __linux__ && __ia64__
+#include <pthread.h>
+
+pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value)
+{
+    pthread_mutex_lock(&counter_mutex);
+    int32_t result = *(int32_t*)ptr;
+    *(int32_t*)ptr = result + value;
+    pthread_mutex_unlock(&counter_mutex);
+    return result;
+}
+
+void __TBB_machine_pause(int32_t /*delay*/) {}
+#endif
+
+#include "harness.h"
+#include "harness_barrier.h"
+
+tbb::atomic<int> FinishedTasks;
+const int MaxTasks = 16;
+
+/*--------------------------------------------------------------------*/
+// The regression test against a bug triggered when malloc initialization 
+// and thread shutdown were called simultaneously, in which case
+// Windows dynamic loader lock and allocator initialization/termination lock
+// were taken in different order.
+
+class TestFunc1 {
+    Harness::SpinBarrier* my_barr;
+public:
+    TestFunc1 (Harness::SpinBarrier& barr) : my_barr(&barr) {}
+    void operator() (bool do_malloc) const {
+        my_barr->wait();
+        if (do_malloc) scalable_malloc(10);
+        ++FinishedTasks;
+    }
+};
+
+typedef NativeParallelForTask<bool,TestFunc1> TestTask1;
+
+void Test1 () {
+    int NTasks = min(MaxTasks, max(2, MaxThread));
+    Harness::SpinBarrier barr(NTasks);
+    TestFunc1 tf(barr);
+    FinishedTasks = 0;
+    tbb::aligned_space<TestTask1,MaxTasks> tasks;
+
+    for(int i=0; i<NTasks; ++i) {
+        TestTask1* t = tasks.begin()+i;
+        new(t) TestTask1(i%2==0, tf);
+        t->start();
+    }
+    
+    Harness::Sleep(1000); // wait a second :)
+    ASSERT( FinishedTasks==NTasks, "Some threads appear to deadlock" );
+
+    for(int i=0; i<NTasks; ++i) {
+        TestTask1* t = tasks.begin()+i;
+        t->wait_to_finish();
+        t->~TestTask1();
+    }
+}
+
+/*--------------------------------------------------------------------*/
+// The regression test against a bug when cross-thread deallocation
+// caused livelock at thread shutdown.
+
+void* ptr = NULL;
+
+class TestFunc2a {
+    Harness::SpinBarrier* my_barr;
+public:
+    TestFunc2a (Harness::SpinBarrier& barr) : my_barr(&barr) {}
+    void operator() (int) const {
+        ptr = scalable_malloc(8);
+        my_barr->wait();
+        ++FinishedTasks;
+    }
+};
+
+typedef NativeParallelForTask<int,TestFunc2a> TestTask2a;
+
+class TestFunc2b: NoAssign {
+    Harness::SpinBarrier* my_barr;
+    TestTask2a& my_ward;
+public:
+    TestFunc2b (Harness::SpinBarrier& barr, TestTask2a& t) : my_barr(&barr), my_ward(t) {}
+    void operator() (int) const {
+        tbb::internal::spin_wait_while_eq(ptr, (void*)NULL);
+        scalable_free(ptr);
+        my_barr->wait();
+        my_ward.wait_to_finish();
+        ++FinishedTasks;
+    }
+};
+void Test2() {
+    Harness::SpinBarrier barr(2);
+    TestFunc2a func2a(barr);
+    TestTask2a t2a(0, func2a);
+    TestFunc2b func2b(barr, t2a);
+    NativeParallelForTask<int,TestFunc2b> t2b(1, func2b);
+    FinishedTasks = 0;
+    t2a.start(); t2b.start();
+    Harness::Sleep(1000); // wait a second :)
+    ASSERT( FinishedTasks==2, "Threads appear to deadlock" );
+    t2b.wait_to_finish(); // t2a is monitored by t2b
+}
+
+int TestMain () {
+    Test1(); // requires malloc initialization so should be first
+    Test2();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_lib_unload.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_lib_unload.cpp
new file mode 100644 (file)
index 0000000..fb8bc00
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#include <cstdlib>
+#if _WIN32 || _WIN64
+#include "tbb/machine/windows_api.h"
+#else
+#include <dlfcn.h>
+#endif
+#include "tbb/tbb_stddef.h"
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+#include "harness_memory.h"
+
+#if TBB_USE_DEBUG
+#define SUFFIX1 "_debug"
+#define SUFFIX2
+#else
+#define SUFFIX1
+#define SUFFIX2 "_debug"
+#endif /* TBB_USE_DEBUG */
+
+#if _WIN32||_WIN64
+#define PREFIX
+#define EXT ".dll"
+#else
+#define PREFIX "lib"
+#if __APPLE__
+#define EXT ".dylib"
+#elif __linux__
+#define EXT __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
+#elif __FreeBSD__ || __sun || _AIX
+#define EXT ".so"
+#else
+#error Unknown OS
+#endif
+#endif
+
+// Form the names of the TBB memory allocator binaries.
+#define MALLOCLIB_NAME1 PREFIX "tbbmalloc" SUFFIX1 EXT
+#define MALLOCLIB_NAME2 PREFIX "tbbmalloc" SUFFIX2 EXT
+
+#if _WIN32 || _WIN64
+#define LIBRARY_HANDLE HMODULE
+#define LOAD_LIBRARY(name) LoadLibrary((name))
+#else
+#define LIBRARY_HANDLE void*
+#define LOAD_LIBRARY(name) dlopen((name), RTLD_NOW|RTLD_GLOBAL)
+#endif
+
+struct Run {
+    void operator()( int /*id*/ ) const {
+        void* (*malloc_ptr)(std::size_t);
+        void (*free_ptr)(void*);
+
+        const char* actual_name;
+        LIBRARY_HANDLE lib = LOAD_LIBRARY(actual_name = MALLOCLIB_NAME1);
+        if (!lib)      lib = LOAD_LIBRARY(actual_name = MALLOCLIB_NAME2);
+        if (!lib) {
+            REPORT("Can't load " MALLOCLIB_NAME1 " or " MALLOCLIB_NAME2 "\n");
+            exit(1);
+        }
+#if _WIN32 || _WIN64
+        // casts at both sides are to soothe MinGW compiler
+        (void *&)malloc_ptr = (void*)GetProcAddress(lib, "scalable_malloc");
+        (void *&)free_ptr = (void*)GetProcAddress(lib, "scalable_free");
+#else
+        (void *&)malloc_ptr = dlsym(lib, "scalable_malloc");
+        (void *&)free_ptr = dlsym(lib, "scalable_free");
+#endif
+        if (!malloc_ptr || !free_ptr)  {
+            REPORT("Can't find scalable_(malloc|free) in %s \n", actual_name);
+            exit(1);
+        }
+
+        void *p = malloc_ptr(100);
+        memset(p, 1, 100);
+        free_ptr(p);
+
+#if _WIN32 || _WIN64
+        BOOL ret = FreeLibrary(lib);
+        ASSERT(ret, "FreeLibrary must be successful");
+        ASSERT(GetModuleHandle(actual_name),  
+               "allocator library must not be unloaded");
+#else
+        int ret = dlclose(lib);
+        ASSERT(ret == 0, "dlclose must be successful");
+        ASSERT(dlsym(RTLD_DEFAULT, "scalable_malloc"),  
+               "allocator library must not be unloaded");
+#endif
+    }
+};
+
+int TestMain () {
+    int i;
+    std::ptrdiff_t memory_leak;
+
+    // warm-up run
+    NativeParallelFor( 1, Run() );
+    /* 1st call to GetMemoryUsage() allocate some memory,
+       but it seems memory consumption stabilized after this.
+     */
+    GetMemoryUsage();
+    std::size_t memory_in_use = GetMemoryUsage();
+    ASSERT(memory_in_use == GetMemoryUsage(), 
+           "Memory consumption should not increase after 1st GetMemoryUsage() call");
+
+    // expect that memory consumption stabilized after several runs
+    for (i=0; i<3; i++) {
+        std::size_t memory_in_use = GetMemoryUsage();
+        for (int j=0; j<10; j++)
+            NativeParallelFor( 1, Run() );
+        memory_leak = GetMemoryUsage() - memory_in_use;
+        if (memory_leak == 0)  // possibly too strong?
+            break;
+    }
+    if(3==i) {
+        // not stabilized, could be leak
+        REPORT( "Error: memory leak of up to %ld bytes\n", static_cast<long>(memory_leak));
+        exit(1);
+    }
+
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_overload.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_overload.cpp
new file mode 100644 (file)
index 0000000..8b1d68c
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#if __linux__
+#define MALLOC_REPLACEMENT_AVAILABLE 1
+#elif _WIN32 && !__MINGW32__ && !__MINGW64__
+#define MALLOC_REPLACEMENT_AVAILABLE 2
+#include "tbb/tbbmalloc_proxy.h"
+#endif
+
+#if MALLOC_REPLACEMENT_AVAILABLE
+
+#if _WIN32 || _WIN64
+// As the test is intentionally build with /EHs-, suppress multiple VS2005's 
+// warnings like C4530: C++ exception handler used, but unwind semantics are not enabled
+#if defined(_MSC_VER) && !__INTEL_COMPILER
+/* ICC 10.1 and 11.0 generates code that uses std::_Raise_handler,
+   but it's only defined in libcpmt(d), which the test doesn't linked with.
+ */
+#define _HAS_EXCEPTIONS 0
+#endif
+// to use strdup and putenv w/o warnings
+#define _CRT_NONSTDC_NO_DEPRECATE 1
+#endif
+#include "harness_report.h"
+#include "harness_assert.h"
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <new>
+
+#if __linux__
+#include <dlfcn.h>
+#include <unistd.h> // for sysconf
+#include <stdint.h> // for uintptr_t
+
+#elif _WIN32
+#include <stddef.h>
+#if __MINGW32__
+#include <unistd.h>
+#else
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#endif
+
+#endif /* OS selection */
+
+#if _WIN32
+// On Windows, the tricky way to print "done" is necessary to create 
+// dependence on msvcpXX.dll, for sake of a regression test.
+// On Linux, C++ RTL headers are undesirable because of breaking strict ANSI mode.
+#if defined(_MSC_VER) && _MSC_VER >= 1300 && _MSC_VER <= 1310 && !defined(__INTEL_COMPILER)
+/* Fixing compilation error reported by VS2003 for exception class
+   when _HAS_EXCEPTIONS is 0: 
+   bad_cast that inherited from exception is not in std namespace.
+*/
+using namespace std;
+#endif
+#include <string>
+#endif
+
+
+template<typename T>
+static inline T alignDown(T arg, uintptr_t alignment) {
+    return T( (uintptr_t)arg  & ~(alignment-1));
+}
+template<typename T>
+static inline bool isAligned(T arg, uintptr_t alignment) {
+    return 0==((uintptr_t)arg &  (alignment-1));
+}
+
+/* Below is part of MemoryAllocator.cpp. */
+
+class BackRefIdx { // composite index to backreference array
+private:
+    uint16_t master;      // index in BackRefMaster
+    uint16_t largeObj:1;  // is this object "large"?
+    uint16_t offset  :15; // offset from beginning of BackRefBlock
+public:
+    BackRefIdx() : master((uint16_t)-1) {}
+    bool isInvalid() { return master == (uint16_t)-1; }
+    bool isLargeObject() const { return largeObj; }
+    uint16_t getMaster() const { return master; }
+    uint16_t getOffset() const { return offset; }
+
+    // only newBackRef can modify BackRefIdx
+    static BackRefIdx newBackRef(bool largeObj);
+};
+
+struct LargeMemoryBlock {
+    LargeMemoryBlock *next,          // ptrs in list of cached blocks
+                     *prev;
+    uintptr_t         age;           // age of block while in cache
+    size_t            objectSize;    // the size requested by a client
+    size_t            unalignedSize; // the size requested from getMemory
+    bool              fromMapMemory;
+    BackRefIdx        backRefIdx;    // cached here, used copy is in LargeObjectHdr
+};
+
+struct LargeObjectHdr {
+    LargeMemoryBlock *memoryBlock;
+    /* Have to duplicate it here from CachedObjectHdr, 
+       as backreference must be checked without further pointer dereference.
+       Points to LargeObjectHdr. */
+    BackRefIdx       backRefIdx;
+};
+
+/*
+ * Objects of this size and larger are considered large objects.
+ */
+const uint32_t minLargeObjectSize = 8065;
+
+/* end of inclusion from MemoryAllocator.cpp */
+
+/* Correct only for large blocks, i.e. not smaller then minLargeObjectSize */
+static bool scalableMallocLargeBlock(void *object, size_t size)
+{
+    ASSERT(size >= minLargeObjectSize, NULL);
+#if MALLOC_REPLACEMENT_AVAILABLE == 2
+    // Check that _msize works correctly
+    ASSERT(_msize(object) >= size, NULL);
+#endif
+
+    LargeMemoryBlock *lmb = ((LargeObjectHdr*)object-1)->memoryBlock;
+    return uintptr_t(lmb)<uintptr_t(((LargeObjectHdr*)object-1)) && lmb->objectSize==size;
+}
+
+struct BigStruct {
+    char f[minLargeObjectSize];
+};
+
+int main(int , char *[]) {
+    void *ptr, *ptr1;
+
+#if MALLOC_REPLACEMENT_AVAILABLE == 1
+    if (NULL == dlsym(RTLD_DEFAULT, "scalable_malloc")) {
+        REPORT("libtbbmalloc not found\nfail\n");
+        return 1;
+    }
+#endif
+
+/* On Windows, memory block size returned by _msize() is sometimes used 
+   to calculate the size for an extended block. Substituting _msize, 
+   scalable_msize initially returned 0 for regions not allocated by the scalable 
+   allocator, which led to incorrect memory reallocation and subsequent crashes.
+   It was found that adding a new environment variable triggers the error.
+*/
+    ASSERT(getenv("PATH"), "We assume that PATH is set everywhere.");
+    char *pathCopy = strdup(getenv("PATH"));
+    const char *newEnvName = "__TBBMALLOC_OVERLOAD_REGRESSION_TEST_FOR_REALLOC_AND_MSIZE";
+    char *newEnv = (char*)malloc(3 + strlen(newEnvName));
+    
+    ASSERT(!getenv(newEnvName), "Environment variable should not be used before.");
+    strcpy(newEnv, newEnvName);
+    strcat(newEnv, "=1");
+    int r = putenv(newEnv);
+    ASSERT(!r, NULL);
+    char *path = getenv("PATH");
+    ASSERT(path && 0==strcmp(path, pathCopy), "Environment was changed erroneously.");
+    free(pathCopy);
+    free(newEnv);
+
+    ptr = malloc(minLargeObjectSize);
+    ASSERT(ptr!=NULL && scalableMallocLargeBlock(ptr, minLargeObjectSize), NULL);
+    free(ptr);
+
+    ptr = calloc(minLargeObjectSize, 2);
+    ASSERT(ptr!=NULL && scalableMallocLargeBlock(ptr, minLargeObjectSize*2), NULL);
+    ptr1 = realloc(ptr, minLargeObjectSize*10);
+    ASSERT(ptr1!=NULL && scalableMallocLargeBlock(ptr1, minLargeObjectSize*10), NULL);
+    free(ptr1);
+
+#if MALLOC_REPLACEMENT_AVAILABLE == 1
+
+    int ret = posix_memalign(&ptr, 1024, 3*minLargeObjectSize);
+    ASSERT(0==ret && ptr!=NULL && scalableMallocLargeBlock(ptr, 3*minLargeObjectSize), NULL);
+    free(ptr);
+
+    ptr = memalign(128, 4*minLargeObjectSize);
+    ASSERT(ptr!=NULL && scalableMallocLargeBlock(ptr, 4*minLargeObjectSize), NULL);
+    free(ptr);
+
+    ptr = valloc(minLargeObjectSize);
+    ASSERT(ptr!=NULL && scalableMallocLargeBlock(ptr, minLargeObjectSize), NULL);
+    free(ptr);
+
+    long memoryPageSize = sysconf(_SC_PAGESIZE);
+    int sz = 1024*minLargeObjectSize;
+    ptr = pvalloc(sz);
+    ASSERT(ptr!=NULL &&                // align size up to the page size
+           scalableMallocLargeBlock(ptr, ((sz-1) | (memoryPageSize-1)) + 1), NULL);
+    free(ptr);
+
+    struct mallinfo info = mallinfo();
+    // right now mallinfo initialized by zero
+    ASSERT(!info.arena && !info.ordblks && !info.smblks && !info.hblks 
+           && !info.hblkhd && !info.usmblks && !info.fsmblks 
+           && !info.uordblks && !info.fordblks && !info.keepcost, NULL);
+
+#elif MALLOC_REPLACEMENT_AVAILABLE == 2
+
+    ptr = _aligned_malloc(minLargeObjectSize,16);
+    ASSERT(ptr!=NULL && scalableMallocLargeBlock(ptr, minLargeObjectSize), NULL);
+
+    ptr1 = _aligned_realloc(ptr, minLargeObjectSize*10,16);
+    ASSERT(ptr1!=NULL && scalableMallocLargeBlock(ptr1, minLargeObjectSize*10), NULL);
+    _aligned_free(ptr1);
+
+#endif
+
+    BigStruct *f = new BigStruct;
+    ASSERT(f!=NULL && scalableMallocLargeBlock(f, sizeof(BigStruct)), NULL);
+    delete f;
+
+    f = new BigStruct[10];
+    ASSERT(f!=NULL && scalableMallocLargeBlock(f, 10*sizeof(BigStruct)), NULL);
+    delete []f;
+
+    f = new(std::nothrow) BigStruct;
+    ASSERT(f!=NULL && scalableMallocLargeBlock(f, sizeof(BigStruct)), NULL);
+    delete f;
+
+    f = new(std::nothrow) BigStruct[2];
+    ASSERT(f!=NULL && scalableMallocLargeBlock(f, 2*sizeof(BigStruct)), NULL);
+    delete []f;
+
+#if _WIN32
+    std::string stdstring = "done";
+    const char* s = stdstring.c_str();
+#else
+    const char* s = "done";
+#endif
+    REPORT("%s\n", s);
+    return 0;
+}
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#define HARNESS_CUSTOM_MAIN 1
+#include "harness.h"
+
+#else  /* !MALLOC_REPLACEMENT_AVAILABLE */
+#include <stdio.h>
+
+int main(int , char *[]) {
+    printf("skip\n");
+    return 0;
+}
+#endif /* !MALLOC_REPLACEMENT_AVAILABLE */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_pure_c.c b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_pure_c.c
new file mode 100644 (file)
index 0000000..a92c481
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#ifdef __cplusplus
+#error For testing purpose, this file should be compiled with a C compiler, not C++
+#endif /*__cplusplus */
+
+#include "tbb/scalable_allocator.h"
+#include <stdio.h>
+#include <assert.h>
+
+/*
+ *  The test is to check if the scalable_allocator.h and its functions
+ *  can be used from pure C programs; also some regression checks are done
+ */
+
+int main(void) {
+    size_t i, j;
+    void *p1, *p2;
+    for( i=0; i<=1<<16; ++i) {
+        p1 = scalable_malloc(i);
+        if( !p1 )
+            printf("Warning: there should be memory but scalable_malloc returned NULL\n");
+        scalable_free(p1);
+    }
+    p1 = p2 = NULL;
+    for( i=1024*1024; ; i/=2 )
+    {
+        scalable_free(p1);
+        p1 = scalable_realloc(p2, i);
+        p2 = scalable_calloc(i, 32);
+        if (p2) {
+            if (i<sizeof(size_t)) {
+                for (j=0; j<i; j++)
+                    assert(0==*((char*)p2+j));
+            } else {
+                for (j=0; j<i; j+=sizeof(size_t))
+                    assert(0==*((size_t*)p2+j));
+            }
+        }
+        scalable_free(p2);
+        p2 = scalable_malloc(i);
+        if (i==0) break;
+    }
+    for( i=1; i<1024*1024; i*=2 )
+    {
+        scalable_free(p1);
+        p1 = scalable_realloc(p2, i);
+        p2 = scalable_malloc(i);
+    }
+    scalable_free(p1);
+    scalable_free(p2);
+    printf("done\n");
+    return 0;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_regression.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_regression.cpp
new file mode 100644 (file)
index 0000000..7e25440
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+
+#include <stdio.h>
+#include "tbb/scalable_allocator.h"
+
+class minimalAllocFree {
+public:
+    void operator()(int size) const {
+        tbb::scalable_allocator<char> a;
+        char* str = a.allocate( size );
+        a.deallocate( str, size );
+    }
+};
+
+#include "harness.h"
+
+template<typename Body, typename Arg>
+void RunThread(const Body& body, const Arg& arg) {
+    NativeParallelForTask<Arg,Body> job(arg, body);
+    job.start();
+    job.wait_to_finish();
+}
+
+/*--------------------------------------------------------------------*/
+// The regression test against bug #1518 where thread boot strap allocations "leaked"
+
+#include "harness_memory.h"
+
+bool TestBootstrapLeak() {
+    /* In the bug 1518, each thread leaked ~384 bytes.
+       Initially, scalable allocator maps 1MB. Thus it is necessary to take out most of this space.
+       1MB is chunked into 16K blocks; of those, one block is for thread boot strap, and one more 
+       should be reserved for the test body. 62 blocks left, each can serve 15 objects of 1024 bytes.
+    */
+    const int alloc_size = 1024;
+    const int take_out_count = 15*62;
+
+    tbb::scalable_allocator<char> a;
+    char* array[take_out_count];
+    for( int i=0; i<take_out_count; ++i )
+        array[i] = a.allocate( alloc_size );
+
+    RunThread( minimalAllocFree(), alloc_size ); // for threading library to take some memory
+    size_t memory_in_use = GetMemoryUsage();
+    // Wait for memory usage data to "stabilize". The test number (1000) has nothing underneath.
+    for( int i=0; i<1000; i++) {
+        if( GetMemoryUsage()!=memory_in_use ) {
+            memory_in_use = GetMemoryUsage();
+            i = -1;
+        }
+    }
+
+    ptrdiff_t memory_leak = 0;
+    // Notice that 16K boot strap memory block is enough to serve 42 threads.
+    const int num_thread_runs = 200;
+    for (int run=0; run<3; run++) {
+        memory_in_use = GetMemoryUsage();
+        for( int i=0; i<num_thread_runs; ++i )
+            RunThread( minimalAllocFree(), alloc_size );
+
+        memory_leak = GetMemoryUsage() - memory_in_use;
+        if (!memory_leak)
+            break;
+    }
+    if( memory_leak>0 ) { // possibly too strong?
+        REPORT( "Error: memory leak of up to %ld bytes\n", static_cast<long>(memory_leak));
+    }
+
+    for( int i=0; i<take_out_count; ++i )
+        a.deallocate( array[i], alloc_size );
+
+    return memory_leak<=0;
+}
+
+/*--------------------------------------------------------------------*/
+// The regression test against a bug with incompatible semantics of msize and realloc
+
+bool TestReallocMsize(size_t startSz) {
+    bool passed = true;
+
+    char *buf = (char*)scalable_malloc(startSz);
+    ASSERT(buf, "");
+    size_t realSz = scalable_msize(buf);
+    ASSERT(realSz>=startSz, "scalable_msize must be not less then allocated size");
+    memset(buf, 'a', realSz-1);
+    buf[realSz-1] = 0;
+    char *buf1 = (char*)scalable_realloc(buf, 2*realSz);
+    ASSERT(buf1, "");
+    ASSERT(scalable_msize(buf1)>=2*realSz, 
+           "scalable_msize must be not less then allocated size");
+    buf1[2*realSz-1] = 0;
+    if ( strspn(buf1, "a") < realSz-1 ) {
+        REPORT( "Error: data broken for %d Bytes object.\n", startSz);
+        passed = false;
+    }
+    scalable_free(buf1);
+
+    return passed;
+}
+
+/*--------------------------------------------------------------------*/
+// The main test function
+
+int TestMain () {
+    bool passed = true;
+    // Check whether memory usage data can be obtained; if not, skip test_bootstrap_leak.
+    if( GetMemoryUsage() )
+        passed &= TestBootstrapLeak();
+
+    // TestReallocMsize runs for each power of 2 and each Fibonacci number below 64K
+    for (size_t a=1, b=1, sum=1; sum<=64*1024; ) {
+        passed &= TestReallocMsize(sum);
+        a = b;
+        b = sum;
+        sum = a+b;
+    }
+    for (size_t a=2; a<=64*1024; a*=2)
+        passed &= TestReallocMsize(a);
+    
+    ASSERT( passed, "Test failed" );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_whitebox.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_malloc_whitebox.cpp
new file mode 100644 (file)
index 0000000..c8bc6a6
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/scalable_allocator.h"
+#include "harness.h"
+#include "harness_barrier.h"
+
+// To not depends on ITT support stuff
+#ifdef DO_ITT_NOTIFY
+#undef DO_ITT_NOTIFY
+#endif
+
+#define protected public
+#define private public
+#include "../tbbmalloc/frontend.cpp"
+#undef protected
+#undef private
+#include "../tbbmalloc/backend.cpp"
+#include "../tbbmalloc/backref.cpp"
+#include "../tbbmalloc/large_objects.cpp"
+#include "../tbbmalloc/tbbmalloc.cpp"
+
+const int LARGE_MEM_SIZES_NUM = 10;
+const size_t MByte = 1024*1024;
+
+class AllocInfo {
+    int *p;
+    int val;
+    int size;
+public:
+    AllocInfo() : p(NULL), val(0), size(0) {}
+    explicit AllocInfo(int size) : p((int*)scalable_malloc(size*sizeof(int))),
+                                   val(rand()), size(size) {
+        ASSERT(p, NULL);
+        for (int k=0; k<size; k++)
+            p[k] = val;
+    }
+    void check() const {
+        for (int k=0; k<size; k++)
+            ASSERT(p[k] == val, NULL);
+    }
+    void clear() {
+        scalable_free(p);
+    }
+};
+
+class TestLargeObjCache: NoAssign {
+    static Harness::SpinBarrier barrier;
+public:
+    static int largeMemSizes[LARGE_MEM_SIZES_NUM];
+
+    static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
+
+    TestLargeObjCache( ) {}
+
+    void operator()( int /*mynum*/ ) const {
+        AllocInfo allocs[LARGE_MEM_SIZES_NUM];
+
+        // push to maximal cache limit
+        for (int i=0; i<2; i++) {
+            const int sizes[] = { MByte/sizeof(int),
+                                  (MByte-2*largeBlockCacheStep)/sizeof(int) };
+            for (int q=0; q<2; q++) {
+                size_t curr = 0;
+                for (int j=0; j<LARGE_MEM_SIZES_NUM; j++, curr++)
+                    new (allocs+curr) AllocInfo(sizes[q]);
+
+                for (size_t j=0; j<curr; j++) {
+                    allocs[j].check();
+                    allocs[j].clear();
+                }
+            }
+        }
+        
+        barrier.wait();
+
+        // check caching correctness
+        for (int i=0; i<1000; i++) {
+            size_t curr = 0;
+            for (int j=0; j<LARGE_MEM_SIZES_NUM-1; j++, curr++)
+                new (allocs+curr) AllocInfo(largeMemSizes[j]);
+
+            new (allocs+curr) 
+                AllocInfo((int)(4*minLargeObjectSize +
+                                2*minLargeObjectSize*(1.*rand()/RAND_MAX)));
+            curr++;
+
+            for (size_t j=0; j<curr; j++) {
+                allocs[j].check();
+                allocs[j].clear();
+            }
+        }
+    }
+};
+
+Harness::SpinBarrier TestLargeObjCache::barrier;
+int TestLargeObjCache::largeMemSizes[LARGE_MEM_SIZES_NUM];
+
+#if MALLOC_CHECK_RECURSION
+
+class TestStartupAlloc: NoAssign {
+    static Harness::SpinBarrier init_barrier;
+    struct TestBlock {
+        void *ptr;
+        size_t sz;
+    };
+    static const int ITERS = 100;
+public:
+    TestStartupAlloc() {}
+    static void initBarrier(unsigned thrds) { init_barrier.initialize(thrds); }
+    void operator()(int) const {
+        TestBlock blocks1[ITERS], blocks2[ITERS];
+
+        init_barrier.wait();
+
+        for (int i=0; i<ITERS; i++) {
+            blocks1[i].sz = rand() % minLargeObjectSize;
+            blocks1[i].ptr = StartupBlock::allocate(blocks1[i].sz);
+            ASSERT(blocks1[i].ptr && StartupBlock::msize(blocks1[i].ptr)>=blocks1[i].sz 
+                   && 0==(uintptr_t)blocks1[i].ptr % sizeof(void*), NULL);
+            memset(blocks1[i].ptr, i, blocks1[i].sz);
+        }
+        for (int i=0; i<ITERS; i++) {
+            blocks2[i].sz = rand() % minLargeObjectSize;
+            blocks2[i].ptr = StartupBlock::allocate(blocks2[i].sz);
+            ASSERT(blocks2[i].ptr && StartupBlock::msize(blocks2[i].ptr)>=blocks2[i].sz 
+                   && 0==(uintptr_t)blocks2[i].ptr % sizeof(void*), NULL);
+            memset(blocks2[i].ptr, i, blocks2[i].sz);
+
+            for (size_t j=0; j<blocks1[i].sz; j++)
+                ASSERT(*((char*)blocks1[i].ptr+j) == i, NULL);
+            Block *block = (Block *)alignDown(blocks1[i].ptr, blockSize);
+            ((StartupBlock *)block)->free(blocks1[i].ptr);
+        }
+        for (int i=ITERS-1; i>=0; i--) {
+            for (size_t j=0; j<blocks2[i].sz; j++)
+                ASSERT(*((char*)blocks2[i].ptr+j) == i, NULL);
+            Block *block = (Block *)alignDown(blocks2[i].ptr, blockSize);
+            ((StartupBlock *)block)->free(blocks2[i].ptr);
+        }
+    }
+};
+
+Harness::SpinBarrier TestStartupAlloc::init_barrier;
+
+#endif /* MALLOC_CHECK_RECURSION */
+
+class BackRefWork: NoAssign {
+    struct TestBlock {
+        intptr_t   data;
+        BackRefIdx idx;
+    };
+    static const int ITERS = 2*BR_MAX_CNT+2;
+public:
+    BackRefWork() {}
+    void operator()(int) const {
+        TestBlock blocks[ITERS];
+
+        for (int i=0; i<ITERS; i++) {
+            blocks[i].idx = BackRefIdx::newBackRef(/*largeObj=*/false);
+            setBackRef(blocks[i].idx, &blocks[i].data);
+        }
+        for (int i=0; i<ITERS; i++)
+            ASSERT((Block*)&blocks[i].data == getBackRef(blocks[i].idx), NULL);
+        for (int i=ITERS-1; i>=0; i--)
+            removeBackRef(blocks[i].idx);
+    }
+};
+
+class FreeBlockPoolHit: NoAssign {
+    // to trigger possible leak for both cleanup on pool overflow 
+    // and on thread termination
+    static const int ITERS = 2*FreeBlockPool::POOL_HIGH_MARK;
+public:
+    FreeBlockPoolHit() {}
+    void operator()(int) const {
+        void *objs[ITERS];
+
+        for (int i=0; i<ITERS; i++)
+            objs[i] = scalable_malloc(minLargeObjectSize-1);
+        for (int i=0; i<ITERS; i++)
+            scalable_free(objs[i]);
+
+#ifdef USE_WINTHREAD
+        // under Windows DllMain used to call mallocThreadShutdownNotification,
+        // as we don't use it have to call the callback manually
+        mallocThreadShutdownNotification(NULL);
+#endif
+    }
+};
+
+static size_t allocatedBackRefCount()
+{
+    size_t cnt = 0;
+    for (int i=0; i<=backRefMaster->lastUsed; i++)
+        cnt += backRefMaster->backRefBl[i]->allocatedCount;
+    return cnt;
+}
+
+void TestBackRef() {
+    size_t beforeNumBackRef, afterNumBackRef;
+
+    beforeNumBackRef = allocatedBackRefCount();
+    for( int p=MaxThread; p>=MinThread; --p )
+        NativeParallelFor( p, BackRefWork() );
+    afterNumBackRef = allocatedBackRefCount();
+    ASSERT(beforeNumBackRef==afterNumBackRef, "backreference leak detected");
+
+    // lastUsed marks peak resource consumption. As we allocate below the mark,
+    // it must not move up, otherwise there is a resource leak.
+    int sustLastUsed = backRefMaster->lastUsed;
+    NativeParallelFor( 1, BackRefWork() );
+    ASSERT(sustLastUsed == backRefMaster->lastUsed, "backreference leak detected");
+    
+    // check leak of back references while per-thread small object pool is in use
+    // warm up need to cover bootStrapMalloc call
+    NativeParallelFor( 1, FreeBlockPoolHit() );
+    beforeNumBackRef = allocatedBackRefCount();
+    NativeParallelFor( 1, FreeBlockPoolHit() );
+    afterNumBackRef = allocatedBackRefCount();
+    ASSERT(beforeNumBackRef==afterNumBackRef, "backreference leak detected");
+}
+
+void TestObjectRecognition() {
+    size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr);
+    unsigned falseObjectSize = 113; // unsigned is the type expected by getObjectSize
+    size_t obtainedSize;
+    Block *auxBackRef;
+
+    ASSERT(sizeof(BackRefIdx)==4, "Unexpected size of BackRefIdx");
+    ASSERT(getObjectSize(falseObjectSize)!=falseObjectSize, "Error in test: bad choice for false object size");
+
+    void* mem = scalable_malloc(2*blockSize);
+    Block* falseBlock = (Block*)alignUp((uintptr_t)mem, blockSize);
+    falseBlock->objectSize = falseObjectSize;
+    char* falseSO = (char*)falseBlock + falseObjectSize*7;
+    ASSERT(alignDown(falseSO, blockSize)==(void*)falseBlock, "Error in test: false object offset is too big");
+
+    void* bufferLOH = scalable_malloc(2*blockSize + headersSize);
+    LargeObjectHdr* falseLO = 
+        (LargeObjectHdr*)alignUp((uintptr_t)bufferLOH + headersSize, blockSize);
+    LargeObjectHdr* headerLO = (LargeObjectHdr*)falseLO-1;
+    headerLO->memoryBlock = (LargeMemoryBlock*)bufferLOH;
+    headerLO->memoryBlock->unalignedSize = 2*blockSize + headersSize;
+    headerLO->memoryBlock->objectSize = blockSize + headersSize;
+    headerLO->backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true);
+    setBackRef(headerLO->backRefIdx, headerLO);
+    ASSERT(scalable_msize(falseLO) == blockSize + headersSize,
+           "Error in test: LOH falsification failed");
+    removeBackRef(headerLO->backRefIdx);
+
+    const int NUM_OF_IDX = BR_MAX_CNT+2;
+    BackRefIdx idxs[NUM_OF_IDX];
+    for (int cnt=0; cnt<2; cnt++) {
+        for (int master = -10; master<10; master++) {
+            falseBlock->backRefIdx.master = (uint16_t)master;
+            headerLO->backRefIdx.master = (uint16_t)master;
+        
+            for (int bl = -10; bl<BR_MAX_CNT+10; bl++) {
+                falseBlock->backRefIdx.offset = (uint16_t)bl;
+                headerLO->backRefIdx.offset = (uint16_t)bl;
+
+                for (int largeObj = 0; largeObj<2; largeObj++) {
+                    falseBlock->backRefIdx.largeObj = largeObj;
+                    headerLO->backRefIdx.largeObj = largeObj;
+
+                    obtainedSize = safer_scalable_msize(falseSO, NULL);
+                    ASSERT(obtainedSize==0, "Incorrect pointer accepted");
+                    obtainedSize = safer_scalable_msize(falseLO, NULL);
+                    ASSERT(obtainedSize==0, "Incorrect pointer accepted");
+                }
+            }
+        }
+        if (cnt == 1) {
+            for (int i=0; i<NUM_OF_IDX; i++)
+                removeBackRef(idxs[i]);
+            break;
+        }
+        for (int i=0; i<NUM_OF_IDX; i++) {
+            idxs[i] = BackRefIdx::newBackRef(/*largeObj=*/false);
+            setBackRef(idxs[i], NULL);
+        }
+    }
+    char *smallPtr = (char*)scalable_malloc(falseObjectSize);
+    obtainedSize = safer_scalable_msize(smallPtr, NULL);
+    ASSERT(obtainedSize==getObjectSize(falseObjectSize), "Correct pointer not accepted?");
+    scalable_free(smallPtr);
+
+    obtainedSize = safer_scalable_msize(mem, NULL);
+    ASSERT(obtainedSize>=2*blockSize, "Correct pointer not accepted?");
+    scalable_free(mem);
+    scalable_free(bufferLOH);
+}
+
+
+int TestMain () {
+    // backreference requires that initialization was done
+    if(!isMallocInitialized()) doInitialization();
+     // to succeed, leak detection must be the 1st memory-intensive test
+    TestBackRef();
+
+#if MALLOC_CHECK_RECURSION
+    for( int p=MaxThread; p>=MinThread; --p ) {
+        TestStartupAlloc::initBarrier( p );
+        NativeParallelFor( p, TestStartupAlloc() );
+        ASSERT(!firstStartupBlock, "Startup heap memory leak detected");
+    }
+#endif
+
+    for (int i=0; i<LARGE_MEM_SIZES_NUM; i++)
+        TestLargeObjCache::largeMemSizes[i] = 
+            (int)(minLargeObjectSize + 2*minLargeObjectSize*(1.*rand()/RAND_MAX));
+
+    for( int p=MaxThread; p>=MinThread; --p ) {
+        TestLargeObjCache::initBarrier( p );
+        NativeParallelFor( p, TestLargeObjCache() );
+    }
+
+    TestObjectRecognition();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_model_plugin.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_model_plugin.cpp
new file mode 100644 (file)
index 0000000..c9adf9f
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+
+#if _WIN32 || _WIN64
+#include "tbb/machine/windows_api.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "tbb/tbb_config.h"
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#if TBB_USE_EXCEPTIONS
+    #include "harness_report.h"
+#endif
+
+#ifdef _USRDLL
+#include "tbb/task_scheduler_init.h"
+
+class CModel {
+public:
+    CModel(void) {};
+    static tbb::task_scheduler_init tbb_init;
+
+    void init_and_terminate( int );
+};
+
+tbb::task_scheduler_init CModel::tbb_init(1);
+
+//! Test that task::initialize and task::terminate work when doing nothing else.
+/** maxthread is treated as the "maximum" number of worker threads. */
+void CModel::init_and_terminate( int maxthread ) {
+    for( int i=0; i<200; ++i ) {
+        switch( i&3 ) {
+            default: {
+                tbb::task_scheduler_init init( rand() % maxthread + 1 );
+                break;
+            }
+            case 0: {
+                tbb::task_scheduler_init init;
+                break;
+            }
+            case 1: {
+                tbb::task_scheduler_init init( tbb::task_scheduler_init::automatic );
+                break;
+            }
+            case 2: {
+                tbb::task_scheduler_init init( tbb::task_scheduler_init::deferred );
+                init.initialize( rand() % maxthread + 1 );
+                init.terminate();
+                break;
+            }
+        }
+    }
+}
+
+extern "C"
+#if _WIN32 || _WIN64
+__declspec(dllexport)
+#endif
+void plugin_call(int maxthread)
+{
+    srand(2);
+    __TBB_TRY {
+        CModel model;
+        model.init_and_terminate(maxthread);
+    } __TBB_CATCH( std::runtime_error& error ) {
+#if TBB_USE_EXCEPTIONS
+        REPORT("ERROR: %s\n", error.what());
+#endif /* TBB_USE_EXCEPTIONS */
+    }
+}
+
+#else /* _USRDLL undefined */
+
+#define HARNESS_NO_ASSERT 1
+#include "harness.h"
+
+extern "C" void plugin_call(int);
+
+void report_error_in(const char* function_name)
+{
+#if _WIN32 || _WIN64
+    char* message;
+    int code = GetLastError();
+
+    FormatMessage(
+        FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+        NULL, code,MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+        (char*)&message, 0, NULL );
+#else
+    char* message = (char*)dlerror();
+    int code = 0;
+#endif
+    REPORT( "%s failed with error %d: %s\n", function_name, code, message);
+
+#if _WIN32 || _WIN64
+    LocalFree(message);
+#endif
+}
+
+int use_lot_of_tls() {
+    int count = 0;
+#if _WIN32 || _WIN64
+    DWORD last_handles[10];
+    DWORD result;
+    result = TlsAlloc();
+    while( result!=TLS_OUT_OF_INDEXES ) {
+        last_handles[++count%10] = result;
+        result = TlsAlloc();
+    }
+    for( int i=0; i<10; ++i )
+        TlsFree(last_handles[i]);
+#else
+    pthread_key_t last_handles[10];
+    pthread_key_t result;
+    int setspecific_dummy=10;
+    while( pthread_key_create(&result, NULL)==0 
+           && count < 4096 ) // Sun Solaris doesn't have any built-in limit, so we set something big enough
+    {
+        last_handles[++count%10] = result;
+        pthread_setspecific(result,&setspecific_dummy);
+    }
+    REMARK("Created %d keys\n", count);
+    for( int i=0; i<10; ++i )
+        pthread_key_delete(last_handles[i]);
+#endif
+    return count-10;
+}
+
+typedef void (*PLUGIN_CALL)(int);
+
+int TestMain () {
+#if !RML_USE_WCRM
+    PLUGIN_CALL my_plugin_call;
+
+    int tls_key_count = use_lot_of_tls();
+    REMARK("%d thread local objects allocated in advance\n", tls_key_count);
+
+#if _WIN32 || _WIN64
+    HMODULE hLib;
+#if __TBB_ARENA_PER_MASTER
+    hLib = LoadLibrary("irml.dll");
+    if ( !hLib )
+        hLib = LoadLibrary("irml_debug.dll");
+    if ( !hLib )
+        return Harness::Skipped; // No shared RML, skip the test
+    FreeLibrary(hLib);
+#endif /* __TBB_ARENA_PER_MASTER */
+#else /* !WIN */
+#if __APPLE__
+    #define LIBRARY_NAME(base) base".dylib"
+#else
+    #define LIBRARY_NAME(base) base".so"
+#endif
+    void* hLib;
+#if __TBB_ARENA_PER_MASTER
+#if __linux__
+    #define RML_LIBRARY_NAME(base) LIBRARY_NAME(base) ".1"
+#else
+    #define RML_LIBRARY_NAME(base) LIBRARY_NAME(base)
+#endif
+    hLib = dlopen(RML_LIBRARY_NAME("libirml"), RTLD_LAZY);
+    if ( !hLib )
+        hLib = dlopen(RML_LIBRARY_NAME("libirml_debug"), RTLD_LAZY);
+    if ( !hLib )
+        return Harness::Skipped;
+    dlclose(hLib);
+#endif /* __TBB_ARENA_PER_MASTER */
+#endif /* OS */
+    for( int i=1; i<100; ++i ) {  
+        REMARK("Iteration %d, loading plugin library...\n", i);
+#if _WIN32 || _WIN64
+        hLib = LoadLibrary("test_model_plugin_dll.dll");
+        if ( !hLib ) {
+#if !__TBB_NO_IMPLICIT_LINKAGE
+            report_error_in("LoadLibrary");
+            return -1;
+#else
+            return Harness::Skipped;
+#endif
+        }
+        my_plugin_call = (PLUGIN_CALL) GetProcAddress(hLib, "plugin_call");
+        if (my_plugin_call==NULL) {
+            report_error_in("GetProcAddress");
+            return -1;
+        }
+#else /* !WIN */
+        hLib = dlopen( LIBRARY_NAME("test_model_plugin_dll"), RTLD_LAZY );
+        if ( !hLib ) {
+#if !__TBB_NO_IMPLICIT_LINKAGE
+            report_error_in("dlopen");
+            return -1;
+#else
+            return Harness::Skipped;
+#endif
+        }
+        my_plugin_call = PLUGIN_CALL (dlsym(hLib, "plugin_call"));
+        if (my_plugin_call==NULL) {
+            report_error_in("dlsym");
+            return -1;
+        }
+#endif /* !WIN */
+
+        REMARK("Calling plugin method...\n");
+        my_plugin_call(MaxThread);
+
+        REMARK("Unloading plugin library...\n");
+#if _WIN32 || _WIN64
+        FreeLibrary(hLib);
+#else
+        dlclose(hLib);
+#endif
+    } // end for(1,100)
+
+    return Harness::Done;
+#else
+    return Harness::Skipped;
+#endif /* !RML_USE_WCRM */
+}
+
+#endif//_USRDLL
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex.cpp
new file mode 100644 (file)
index 0000000..0e0ecb2
--- /dev/null
@@ -0,0 +1,628 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+//------------------------------------------------------------------------
+// Test TBB mutexes when used with parallel_for.h
+//
+// Usage: test_Mutex.exe [-v] nthread
+//
+// The -v option causes timing information to be printed.
+//
+// Compile with _OPENMP and -openmp
+//------------------------------------------------------------------------
+#include "tbb/spin_mutex.h"
+#include "tbb/critical_section.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/queuing_rw_mutex.h"
+#include "tbb/queuing_mutex.h"
+#include "tbb/mutex.h"
+#include "tbb/recursive_mutex.h"
+#include "tbb/null_mutex.h"
+#include "tbb/null_rw_mutex.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+#include "tbb/tick_count.h"
+#include "tbb/atomic.h"
+#include "harness.h"
+#include <cstdlib>
+#include <cstdio>
+#if _OPENMP
+#include "test/OpenMP_Mutex.h"
+#endif /* _OPENMP */
+#include "tbb/tbb_profiling.h"
+
+#ifndef TBB_TEST_LOW_WORKLOAD
+    #define TBB_TEST_LOW_WORKLOAD TBB_USE_THREADING_TOOLS
+#endif
+
+// This test deliberately avoids a "using tbb" statement,
+// so that the error of putting types in the wrong namespace will be caught.
+
+template<typename M>
+struct Counter {
+    typedef M mutex_type;
+    M mutex;
+    volatile long value;
+};
+
+//! Function object for use with parallel_for.h.
+template<typename C>
+struct AddOne: NoAssign {
+    C& counter;
+    /** Increments counter once for each iteration in the iteration space. */
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            if( i&1 ) {
+                // Try implicit acquire and explicit release
+                typename C::mutex_type::scoped_lock lock(counter.mutex);
+                counter.value = counter.value+1;
+                lock.release();
+            } else {
+                // Try explicit acquire and implicit release
+                typename C::mutex_type::scoped_lock lock;
+                lock.acquire(counter.mutex);
+                counter.value = counter.value+1;
+            }
+        }
+    }
+    AddOne( C& counter_ ) : counter(counter_) {}
+};
+
+//! Adaptor for using ISO C++0x style mutex as a TBB-style mutex.
+template<typename M>
+class TBB_MutexFromISO_Mutex {
+    M my_iso_mutex;
+public:
+    typedef TBB_MutexFromISO_Mutex mutex_type;
+
+    class scoped_lock;
+    friend class scoped_lock;
+
+    class scoped_lock {
+        mutex_type* my_mutex;
+    public:
+        scoped_lock() : my_mutex(NULL) {}
+        scoped_lock( mutex_type& m ) : my_mutex(NULL) {
+            acquire(m);
+        }
+        scoped_lock( mutex_type& m, bool is_writer ) : my_mutex(NULL) {
+            acquire(m,is_writer);
+        }
+        void acquire( mutex_type& m ) {
+            m.my_iso_mutex.lock();
+            my_mutex = &m;
+        }
+        bool try_acquire( mutex_type& m ) {
+            if( m.my_iso_mutex.try_lock() ) {
+                my_mutex = &m;
+                return true;
+            } else {
+                return false;
+            }
+        }
+        void release() {
+            my_mutex->my_iso_mutex.unlock();
+            my_mutex = NULL;
+        }
+
+        // Methods for reader-writer mutex
+        // These methods can be instantiated only if M supports lock_read() and try_lock_read().
+        
+        void acquire( mutex_type& m, bool is_writer ) {
+            if( is_writer ) m.my_iso_mutex.lock();
+            else m.my_iso_mutex.lock_read();
+            my_mutex = &m;
+        } 
+        bool try_acquire( mutex_type& m, bool is_writer ) {
+            if( is_writer ? m.my_iso_mutex.try_lock() : m.my_iso_mutex.try_lock_read() ) {
+                my_mutex = &m;
+                return true;
+            } else {
+                return false;
+            }
+        }
+        bool upgrade_to_writer() {
+            my_mutex->my_iso_mutex.unlock();
+            my_mutex->my_iso_mutex.lock(); 
+            return false;
+        }
+        bool downgrade_to_reader() {
+            my_mutex->my_iso_mutex.unlock();
+            my_mutex->my_iso_mutex.lock_read(); 
+            return false;
+        }
+        ~scoped_lock() {
+            if( my_mutex ) 
+                release();
+        }
+    };    
+  
+    static const bool is_recursive_mutex = M::is_recursive_mutex;
+    static const bool is_rw_mutex = M::is_rw_mutex;
+};
+
+namespace tbb {
+    namespace profiling {
+        template<typename M>
+        void set_name( const TBB_MutexFromISO_Mutex<M>&, const char* ) {}  
+    }
+}
+
+//! Generic test of a TBB mutex type M.
+/** Does not test features specific to reader-writer locks. */
+template<typename M>
+void Test( const char * name ) {
+    REMARK("%s time = ",name);
+    Counter<M> counter;
+    counter.value = 0;
+    tbb::profiling::set_name(counter.mutex, name);
+#if TBB_TEST_LOW_WORKLOAD
+    const int n = 10000;
+#else
+    const int n = 100000;
+#endif /* TBB_TEST_LOW_WORKLOAD */
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/10),AddOne<Counter<M> >(counter));
+    tbb::tick_count t1 = tbb::tick_count::now();
+    REMARK("%g usec\n",(t1-t0).seconds());
+    if( counter.value!=n )
+        REPORT("ERROR for %s: counter.value=%ld\n",name,counter.value);
+}
+
+template<typename M, size_t N>
+struct Invariant {
+    typedef M mutex_type;
+    M mutex;
+    const char* mutex_name;
+    volatile long value[N];
+    volatile long single_value;
+    Invariant( const char* mutex_name_ ) :
+        mutex_name(mutex_name_)
+    {
+        single_value = 0;
+        for( size_t k=0; k<N; ++k )
+            value[k] = 0;
+        tbb::profiling::set_name(mutex, mutex_name_);
+    }
+    void update() {
+        for( size_t k=0; k<N; ++k )
+            ++value[k];
+    }
+    bool value_is( long expected_value ) const {
+        long tmp;
+        for( size_t k=0; k<N; ++k )
+            if( (tmp=value[k])!=expected_value ) {
+                REPORT("ERROR: %ld!=%ld\n", tmp, expected_value);
+                return false;
+            }
+        return true;
+    }
+    bool is_okay() {
+        return value_is( value[0] );
+    }
+};
+
+//! Function object for use with parallel_for.h.
+template<typename I>
+struct TwiddleInvariant: NoAssign {
+    I& invariant;
+    /** Increments counter once for each iteration in the iteration space. */
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            //! Every 8th access is a write access
+            bool write = (i%8)==7;
+            bool okay = true;
+            bool lock_kept = true;
+            if( (i/8)&1 ) {
+                // Try implicit acquire and explicit release
+                typename I::mutex_type::scoped_lock lock(invariant.mutex,write);
+                if( write ) {
+                    long my_value = invariant.value[0];
+                    invariant.update();
+                    if( i%16==7 ) {
+                        lock_kept = lock.downgrade_to_reader();
+                        if( !lock_kept )
+                            my_value = invariant.value[0] - 1;
+                        okay = invariant.value_is(my_value+1);
+                    }
+                } else {
+                    okay = invariant.is_okay();
+                    if( i%8==3 ) {
+                        long my_value = invariant.value[0];
+                        lock_kept = lock.upgrade_to_writer();
+                        if( !lock_kept )
+                            my_value = invariant.value[0];
+                        invariant.update();
+                        okay = invariant.value_is(my_value+1);
+                    }
+                }
+                lock.release();
+            } else {
+                // Try explicit acquire and implicit release
+                typename I::mutex_type::scoped_lock lock;
+                lock.acquire(invariant.mutex,write);
+                if( write ) {
+                    long my_value = invariant.value[0];
+                    invariant.update();
+                    if( i%16==7 ) {
+                        lock_kept = lock.downgrade_to_reader();
+                        if( !lock_kept )
+                            my_value = invariant.value[0] - 1;
+                        okay = invariant.value_is(my_value+1);
+                    }
+                } else {
+                    okay = invariant.is_okay();
+                    if( i%8==3 ) {
+                        long my_value = invariant.value[0];
+                        lock_kept = lock.upgrade_to_writer();
+                        if( !lock_kept )
+                            my_value = invariant.value[0];
+                        invariant.update();
+                        okay = invariant.value_is(my_value+1);
+                    }
+                }
+            }
+            if( !okay ) {
+                REPORT( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i),
+                             write?"write,":"read,", write?(i%16==7?"downgrade,":""):(i%8==3?"upgrade,":""),
+                             lock_kept?"lock kept,":"lock not kept,", (i/8)&1?"imp/exp":"exp/imp" );
+            }
+        }
+    }
+    TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {}
+};
+
+/** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */
+template<typename M>
+void TestReaderWriterLock( const char * mutex_name ) {
+    REMARK( "%s readers & writers time = ", mutex_name );
+    Invariant<M,8> invariant(mutex_name);
+#if TBB_TEST_LOW_WORKLOAD
+    const size_t n = 10000;
+#else
+    const size_t n = 500000;
+#endif /* TBB_TEST_LOW_WORKLOAD */
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,n/100),TwiddleInvariant<Invariant<M,8> >(invariant));
+    tbb::tick_count t1 = tbb::tick_count::now();
+    // There is either a writer or a reader upgraded to a writer for each 4th iteration
+    long expected_value = n/4;
+    if( !invariant.value_is(expected_value) )
+        REPORT("ERROR for %s: final invariant value is wrong\n",mutex_name);
+    REMARK( "%g usec\n", (t1-t0).seconds() );
+}
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress "conditional expression is constant" warning.
+    #pragma warning( push )
+    #pragma warning( disable: 4127 )
+#endif
+
+/** Test try_acquire_reader functionality of a non-reenterable reader-writer mutex */
+template<typename M>
+void TestTryAcquireReader_OneThread( const char * mutex_name ) {
+    M tested_mutex;
+    typename M::scoped_lock lock1;
+    if( M::is_rw_mutex ) {
+        if( lock1.try_acquire(tested_mutex, false) )
+            lock1.release();
+        else
+            REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+        {
+            typename M::scoped_lock lock2(tested_mutex, false);
+            if( lock1.try_acquire(tested_mutex) )
+                REPORT("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name);
+            lock2.release();
+            lock2.acquire(tested_mutex, true);
+            if( lock1.try_acquire(tested_mutex, false) )
+                REPORT("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name);
+        }
+        if( lock1.try_acquire(tested_mutex, false) )
+            lock1.release();
+        else
+            REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+    }
+}
+
+/** Test try_acquire functionality of a non-reenterable mutex */
+template<typename M>
+void TestTryAcquire_OneThread( const char * mutex_name ) {
+    M tested_mutex;
+    typename M::scoped_lock lock1;
+    if( lock1.try_acquire(tested_mutex) )
+        lock1.release();
+    else
+        REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+    {
+        if( M::is_recursive_mutex ) {
+            typename M::scoped_lock lock2(tested_mutex);
+            if( lock1.try_acquire(tested_mutex) )
+                lock1.release();
+            else
+                REPORT("ERROR for %s: try_acquire on recursive lock failed though it should not\n", mutex_name);
+            //windows.. -- both are recursive
+        } else {
+            typename M::scoped_lock lock2(tested_mutex);
+            if( lock1.try_acquire(tested_mutex) )
+                REPORT("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name);
+        }
+    }
+    if( lock1.try_acquire(tested_mutex) )
+        lock1.release();
+    else
+        REPORT("ERROR for %s: try_acquire failed though it should not\n", mutex_name);
+} 
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+const int RecurN = 4;
+int RecurArray[ RecurN ];
+tbb::recursive_mutex RecurMutex[ RecurN ];
+
+struct RecursiveAcquisition {
+    /** x = number being decoded in base N
+        max_lock = index of highest lock acquired so far
+        mask = bit mask; ith bit set if lock i has been acquired. */
+    void Body( size_t x, int max_lock=-1, unsigned int mask=0 ) const
+    {
+        int i = (int) (x % RecurN);
+        bool first = (mask&1U<<i)==0;
+        if( first ) {
+            // first time to acquire lock
+            if( i<max_lock ) 
+                // out of order acquisition might lead to deadlock, so stop
+                return;
+            max_lock = i;
+        }
+
+        if( (i&1)!=0 ) {
+            // acquire lock on location RecurArray[i] using explict acquire
+            tbb::recursive_mutex::scoped_lock r_lock;
+            r_lock.acquire( RecurMutex[i] );
+            int a = RecurArray[i];
+            ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" );
+            ++RecurArray[i];
+            if( x ) 
+                Body( x/RecurN, max_lock, mask|1U<<i );
+            --RecurArray[i];
+            ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" );                        
+
+            // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one
+            if( (i&2)!=0 ) r_lock.release();
+        } else {
+            // acquire lock on location RecurArray[i] using implicit acquire
+            tbb::recursive_mutex::scoped_lock r_lock( RecurMutex[i] );
+            int a = RecurArray[i];
+
+            ASSERT( (a==0)==first, "should be either a==0 if it is the first time to acquire the lock or a!=0 otherwise" );
+
+            ++RecurArray[i];
+            if( x ) 
+                Body( x/RecurN, max_lock, mask|1U<<i );
+            --RecurArray[i];
+
+            ASSERT( a==RecurArray[i], "a is not equal to RecurArray[i]" );                        
+
+            // release lock on location RecurArray[i] using explicit release; otherwise, use implicit one
+            if( (i&2)!=0 ) r_lock.release();
+        }
+    }
+
+    void operator()( const tbb::blocked_range<size_t> &r ) const
+    {   
+        for( size_t x=r.begin(); x<r.end(); x++ ) {
+            Body( x );
+        }
+    }
+};
+
+/** This test is generic so that we may test other kinds of recursive mutexes.*/
+template<typename M>
+void TestRecursiveMutex( const char * mutex_name )
+{
+    for ( int i = 0; i < RecurN; ++i ) {
+        tbb::profiling::set_name(RecurMutex[i], mutex_name);
+    }
+    tbb::tick_count t0 = tbb::tick_count::now();
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,10000,500), RecursiveAcquisition());
+    tbb::tick_count t1 = tbb::tick_count::now();
+    REMARK( "%s recursive mutex time = %g usec\n", mutex_name, (t1-t0).seconds() );
+}
+
+template<typename C>
+struct NullRecursive: NoAssign {
+    void recurse_till( size_t i, size_t till ) const {
+        if( i==till ) {
+            counter.value = counter.value+1;
+            return;
+        }
+        if( i&1 ) {
+            typename C::mutex_type::scoped_lock lock2(counter.mutex);
+            recurse_till( i+1, till );
+            lock2.release();
+        } else {
+            typename C::mutex_type::scoped_lock lock2;
+            lock2.acquire(counter.mutex);
+            recurse_till( i+1, till );
+        }
+    }
+
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        typename C::mutex_type::scoped_lock lock(counter.mutex);
+        recurse_till( range.begin(), range.end() );
+    }
+    NullRecursive( C& counter_ ) : counter(counter_) {
+        ASSERT( C::mutex_type::is_recursive_mutex, "Null mutex should be a recursive mutex." );
+    }
+    C& counter;
+};
+
+template<typename M>
+struct NullUpgradeDowngrade: NoAssign {
+    void operator()( tbb::blocked_range<size_t>& range ) const {
+        typename M::scoped_lock lock2;
+        for( size_t i=range.begin(); i!=range.end(); ++i ) {
+            if( i&1 ) {
+                typename M::scoped_lock lock1(my_mutex, true) ;
+                if( lock1.downgrade_to_reader()==false )
+                    REPORT("ERROR for %s: downgrade should always succeed\n", name);
+            } else {
+                lock2.acquire( my_mutex, false );
+                if( lock2.upgrade_to_writer()==false )
+                    REPORT("ERROR for %s: upgrade should always succeed\n", name);
+                lock2.release();
+            }
+        }
+    }
+
+    NullUpgradeDowngrade( M& m_, const char* n_ ) : my_mutex(m_), name(n_) {}
+    M& my_mutex;
+    const char* name;
+} ;
+
+template<typename M>
+void TestNullMutex( const char * name ) {
+    Counter<M> counter;
+    counter.value = 0;
+    const int n = 100;
+    REMARK("%s ",name);
+    {
+        tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),AddOne<Counter<M> >(counter));
+    }
+    counter.value = 0;
+    {
+        tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullRecursive<Counter<M> >(counter));
+    }
+
+}
+
+template<typename M>
+void TestNullRWMutex( const char * name ) {
+    REMARK("%s ",name);
+    const int n = 100;
+    M m;
+    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullUpgradeDowngrade<M>(m, name));
+}
+
+//! Test ISO C++0x compatibility portion of TBB mutex 
+template<typename M>
+void TestISO( const char * name ) {
+    typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
+    Test<tbb_from_iso>( name );
+}
+
+//! Test ISO C++0x try_lock functionality of a non-reenterable mutex */
+template<typename M>
+void TestTryAcquire_OneThreadISO( const char * name ) {
+    typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
+    TestTryAcquire_OneThread<tbb_from_iso>( name );
+}
+
+//! Test ISO-like C++0x compatibility portion of TBB reader-writer mutex 
+template<typename M>
+void TestReaderWriterLockISO( const char * name ) {
+    typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
+    TestReaderWriterLock<tbb_from_iso>( name );
+    TestTryAcquireReader_OneThread<tbb_from_iso>( name );
+}
+
+//! Test ISO C++0x compatibility portion of TBB recursive mutex 
+template<typename M>
+void TestRecursiveMutexISO( const char * name ) {
+    typedef TBB_MutexFromISO_Mutex<M> tbb_from_iso;
+    TestRecursiveMutex<tbb_from_iso>(name); 
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        REMARK( "testing with %d workers\n", static_cast<int>(p) );
+#if TBB_TEST_LOW_WORKLOAD
+        // The amount of work is decreased in this mode to bring the length 
+        // of the runs under tools into the tolerable limits.
+        const int n = 1;
+#else
+        const int n = 3;
+#endif
+        // Run each test several times.
+        for( int i=0; i<n; ++i ) {
+            TestNullMutex<tbb::null_mutex>( "Null Mutex" );
+            TestNullMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
+            TestNullRWMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
+            Test<tbb::spin_mutex>( "Spin Mutex" );
+#if _OPENMP
+            Test<OpenMP_Mutex>( "OpenMP_Mutex" );
+#endif /* _OPENMP */
+            Test<tbb::queuing_mutex>( "Queuing Mutex" );
+            Test<tbb::mutex>( "Wrapper Mutex" );
+            Test<tbb::recursive_mutex>( "Recursive Mutex" );
+            Test<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
+            Test<tbb::spin_rw_mutex>( "Spin RW Mutex" );
+
+            TestTryAcquire_OneThread<tbb::spin_mutex>("Spin Mutex");
+            TestTryAcquire_OneThread<tbb::queuing_mutex>("Queuing Mutex");
+#if USE_PTHREAD 
+            // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
+            TestTryAcquire_OneThread<tbb::mutex>("Wrapper Mutex");
+#endif /* USE_PTHREAD */
+            TestTryAcquire_OneThread<tbb::recursive_mutex>( "Recursive Mutex" );
+            TestTryAcquire_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); // only tests try_acquire for writers
+            TestTryAcquire_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); // only tests try_acquire for writers
+            TestTryAcquireReader_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); 
+            TestTryAcquireReader_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); 
+
+            TestReaderWriterLock<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
+            TestReaderWriterLock<tbb::spin_rw_mutex>( "Spin RW Mutex" );
+
+            TestRecursiveMutex<tbb::recursive_mutex>( "Recursive Mutex" );
+
+            // Test ISO C++0x interface  
+            TestISO<tbb::spin_mutex>( "ISO Spin Mutex" );
+            TestISO<tbb::mutex>( "ISO Mutex" );
+            TestISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
+            TestISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
+            TestISO<tbb::critical_section>( "ISO Critical Section" );
+            TestTryAcquire_OneThreadISO<tbb::spin_mutex>( "ISO Spin Mutex" );
+#if USE_PTHREAD 
+            // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
+            TestTryAcquire_OneThreadISO<tbb::mutex>( "ISO Mutex" );
+#endif /* USE_PTHREAD */
+            TestTryAcquire_OneThreadISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
+            TestTryAcquire_OneThreadISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
+            TestTryAcquire_OneThreadISO<tbb::critical_section>( "ISO Critical Section" );
+            TestReaderWriterLockISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
+            TestRecursiveMutexISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
+        }
+        REMARK( "calling destructor for task_scheduler_init\n" );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex_native_threads.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_mutex_native_threads.cpp
new file mode 100644 (file)
index 0000000..ffda4e4
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/spin_mutex.h"
+#include "tbb/queuing_mutex.h"
+#include "tbb/queuing_rw_mutex.h"
+#include "tbb/spin_rw_mutex.h"
+#include "tbb/tick_count.h"
+#include "tbb/atomic.h"
+
+#include "harness.h"
+
+// This test deliberately avoids a "using tbb" statement,
+// so that the error of putting types in the wrong namespace will be caught.
+
+template<typename M>
+struct Counter {
+    typedef M mutex_type;
+    M mutex;
+    volatile long value; 
+    void flog_once( size_t mode );
+};
+
+template<typename M>
+void Counter<M>::flog_once(size_t mode)
+/** Increments counter once for each iteration in the iteration space. */
+{
+    if( mode&1 ) {
+        // Try implicit acquire and explicit release
+        typename mutex_type::scoped_lock lock(mutex);
+        value = value+1;
+        lock.release();
+    } else {
+        // Try explicit acquire and implicit release
+        typename mutex_type::scoped_lock lock;
+        lock.acquire(mutex);
+        value = value+1;
+    }
+}
+
+template<typename M, long N>
+struct Invariant {
+    typedef M mutex_type;
+    M mutex;
+    const char* mutex_name;
+    volatile long value[N];
+    volatile long single_value;
+    Invariant( const char* mutex_name_ ) :
+        mutex_name(mutex_name_)
+    {
+        single_value = 0;
+        for( long k=0; k<N; ++k )
+            value[k] = 0;
+    }
+    void update() {
+        for( long k=0; k<N; ++k )
+            ++value[k];
+    }
+    bool value_is( long expected_value ) const {
+        long tmp;
+        for( long k=0; k<N; ++k )
+            if( (tmp=value[k])!=expected_value ) {
+                REPORT("ERROR: %ld!=%ld\n", tmp, expected_value);
+                return false;
+            }
+        return true;
+    }
+    bool is_okay() {
+        return value_is( value[0] );
+    }
+    void flog_once( size_t mode ); 
+};
+
+template<typename M, long N>
+void Invariant<M,N>::flog_once( size_t mode )
+{
+    //! Every 8th access is a write access
+    bool write = (mode%8)==7;
+    bool okay = true;
+    bool lock_kept = true;
+    if( (mode/8)&1 ) {
+        // Try implicit acquire and explicit release
+        typename mutex_type::scoped_lock lock(mutex,write);
+        if( write ) {
+            long my_value = value[0];
+            update();
+            if( mode%16==7 ) {
+                lock_kept = lock.downgrade_to_reader();
+                if( !lock_kept )
+                    my_value = value[0] - 1;
+                okay = value_is(my_value+1);
+            }
+        } else {
+            okay = is_okay();
+            if( mode%8==3 ) {
+                long my_value = value[0];
+                lock_kept = lock.upgrade_to_writer();
+                if( !lock_kept )
+                    my_value = value[0];
+                update();
+                okay = value_is(my_value+1);
+            }
+        }
+        lock.release();
+    } else {
+        // Try explicit acquire and implicit release
+        typename mutex_type::scoped_lock lock;
+        lock.acquire(mutex,write);
+        if( write ) {
+            long my_value = value[0];
+            update();
+            if( mode%16==7 ) {
+                lock_kept = lock.downgrade_to_reader();
+                if( !lock_kept )
+                    my_value = value[0] - 1;
+                okay = value_is(my_value+1);
+            }
+        } else {
+            okay = is_okay();
+            if( mode%8==3 ) {
+                long my_value = value[0];
+                lock_kept = lock.upgrade_to_writer();
+                if( !lock_kept )
+                    my_value = value[0];
+                update();
+                okay = value_is(my_value+1);
+            }
+        }
+    }
+    if( !okay ) {
+        REPORT( "ERROR for %s at %ld: %s %s %s %s\n",mutex_name, long(mode),
+                write?"write,":"read,", write?(mode%16==7?"downgrade,":""):(mode%8==3?"upgrade,":""),
+                lock_kept?"lock kept,":"lock not kept,", (mode/8)&1?"imp/exp":"exp/imp" );
+    }
+}
+
+static tbb::atomic<size_t> Order;
+
+template<typename State, long TestSize>
+struct Work: NoAssign {
+    static const size_t chunk = 100;
+    State& state;
+    Work( State& state_ ) : state(state_) {}
+    void operator()( int ) const {
+        size_t step;
+        while( (step=Order.fetch_and_add<tbb::acquire>(chunk))<TestSize )
+            for( size_t i=0; i<chunk && step<TestSize; ++i, ++step ) 
+                state.flog_once(step);
+    }
+};
+
+//! Generic test of a TBB Mutex type M.
+/** Does not test features specific to reader-writer locks. */
+template<typename M>
+void Test( const char * name, int nthread ) {
+    REMARK("testing %s\n",name);
+    Counter<M> counter;
+    counter.value = 0;
+    Order = 0;
+    const long test_size = 100000;
+    tbb::tick_count t0 = tbb::tick_count::now();
+    NativeParallelFor( nthread, Work<Counter<M>, test_size>(counter) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+
+    REMARK("%s time = %g usec\n",name, (t1-t0).seconds() );
+    if( counter.value!=test_size )
+        REPORT("ERROR for %s: counter.value=%ld != %ld=test_size\n",name,counter.value,test_size);
+}
+
+
+//! Generic test of TBB ReaderWriterMutex type M
+template<typename M>
+void TestReaderWriter( const char * mutex_name, int nthread ) {
+    REMARK("testing %s\n",mutex_name);
+    Invariant<M,8> invariant(mutex_name);
+    Order = 0;
+    static const long test_size = 1000000;
+    tbb::tick_count t0 = tbb::tick_count::now();
+    NativeParallelFor( nthread, Work<Invariant<M,8>, test_size>(invariant) );
+    tbb::tick_count t1 = tbb::tick_count::now();
+    // There is either a writer or a reader upgraded to a writer for each 4th iteration
+    long expected_value = test_size/4;
+    if( !invariant.value_is(expected_value) )
+        REPORT("ERROR for %s: final invariant value is wrong\n",mutex_name);
+    REMARK("%s readers & writers time = %g usec\n",mutex_name,(t1-t0).seconds());
+}
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK( "testing with %d threads\n", p );
+        Test<tbb::spin_mutex>( "spin_mutex", p );
+        Test<tbb::queuing_mutex>( "queuing_mutex", p );
+        Test<tbb::queuing_rw_mutex>( "queuing_rw_mutex", p );
+        Test<tbb::spin_rw_mutex>( "spin_rw_mutex", p );
+        TestReaderWriter<tbb::queuing_rw_mutex>( "queuing_rw_mutex", p );
+        TestReaderWriter<tbb::spin_rw_mutex>( "spin_rw_mutex", p );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_openmp.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_openmp.cpp
new file mode 100644 (file)
index 0000000..9d74a41
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test mixing OpenMP and TBB
+
+/* SCR #471 
+ Bellow is workaround to compile test within enviroment of Intel Compiler
+ but by Microsoft Compiler. So, there is wrong "omp.h" file included and
+ manifest section is missed from .exe file - restoring here.
+
+ As of Visual Studio 2010, crtassem.h is no longer shipped.
+ */
+#if !defined(__INTEL_COMPILER) && _MSC_VER >= 1400 && _MSC_VER < 1600
+    #include <crtassem.h>
+    #if !defined(_OPENMP)
+        #define _OPENMP
+        #if defined(_DEBUG)
+            #pragma comment(lib, "vcompd")
+        #else   // _DEBUG
+            #pragma comment(lib, "vcomp")
+        #endif  // _DEBUG
+    #endif // _OPENMP
+
+    #if defined(_DEBUG)
+        #if defined(_M_IX86)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' "         \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='x86' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #elif defined(_M_AMD64)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' "         \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='amd64' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #elif defined(_M_IA64)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".DebugOpenMP' "         \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='ia64' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #endif
+    #else   // _DEBUG
+        #if defined(_M_IX86)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' "              \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='x86' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #elif defined(_M_AMD64)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' "              \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='amd64' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #elif defined(_M_IA64)
+            #pragma comment(linker,"/manifestdependency:\"type='win32' "            \
+                "name='" __LIBRARIES_ASSEMBLY_NAME_PREFIX ".OpenMP' "              \
+                "version='" _CRT_ASSEMBLY_VERSION "' "                          \
+                "processorArchitecture='ia64' "                                  \
+                "publicKeyToken='" _VC_ASSEMBLY_PUBLICKEYTOKEN "'\"")
+        #endif
+    #endif  // _DEBUG
+    #define _OPENMP_NOFORCE_MANIFEST
+#endif
+
+#include <omp.h>
+
+
+typedef short T;
+
+void SerialConvolve( T c[], const T a[], int m, const T b[], int n ) {
+    for( int i=0; i<m+n-1; ++i ) {
+        int start = i<n ? 0 : i-n+1;
+        int finish = i<m ? i+1 : m; 
+        T sum = 0;
+        for( int j=start; j<finish; ++j ) 
+            sum += a[j]*b[i-j];
+        c[i] = sum;
+    }
+}
+
+#include "tbb/blocked_range.h"
+#include "tbb/parallel_for.h"
+#include "tbb/parallel_reduce.h"
+#include "tbb/task_scheduler_init.h"
+#include "harness.h"
+
+using namespace tbb;
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress overzealous warning about short+=short
+    #pragma warning( push )
+    #pragma warning( disable: 4244 )
+#endif
+
+class InnerBody: NoAssign {
+    const T* my_a;
+    const T* my_b;
+    const int i;
+public:
+    T sum;
+    InnerBody( T /*c*/[], const T a[], const T b[], int i ) :
+        my_a(a), my_b(b), sum(0), i(i)
+    {}
+    InnerBody( InnerBody& x, split ) :
+        my_a(x.my_a), my_b(x.my_b), sum(0), i(x.i)
+    { 
+    }
+    void join( InnerBody& x ) {sum += x.sum;}
+    void operator()( const blocked_range<int>& range ) {
+        for( int j=range.begin(); j!=range.end(); ++j ) 
+            sum += my_a[j]*my_b[i-j];
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+//! Test OpenMMP loop around TBB loop
+void OpenMP_TBB_Convolve( T c[], const T a[], int m, const T b[], int n ) {
+    REMARK("testing OpenMP loop around TBB loop\n");
+#pragma omp parallel 
+    {
+        task_scheduler_init init;
+#pragma omp for
+        for( int i=0; i<m+n-1; ++i ) {
+            int start = i<n ? 0 : i-n+1;
+            int finish = i<m ? i+1 : m; 
+            InnerBody body(c,a,b,i);
+            parallel_reduce( blocked_range<int>(start,finish,10), body );
+            c[i] = body.sum;
+        }
+    }
+}
+
+class OuterBody: NoAssign {
+    const T* my_a;
+    const T* my_b;
+    T* my_c;
+    const int m;
+    const int n;
+public:
+    T sum;
+    OuterBody( T c[], const T a[], int m_, const T b[], int n_ ) :
+        my_c(c), my_a(a), my_b(b), m(m_), n(n_)
+    {}
+    void operator()( const blocked_range<int>& range ) const {
+        for( int i=range.begin(); i!=range.end(); ++i ) {
+            int start = i<n ? 0 : i-n+1;
+            int finish = i<m ? i+1 : m; 
+            T sum = 0;
+#pragma omp parallel for reduction(+:sum)
+            for( int j=start; j<finish; ++j ) 
+                sum += my_a[j]*my_b[i-j];
+            my_c[i] = sum;
+        }
+    }
+};
+
+//! Test TBB loop around OpenMP loop
+void TBB_OpenMP_Convolve( T c[], const T a[], int m, const T b[], int n ) {
+    REMARK("testing TBB loop around OpenMP loop\n");
+    parallel_for( blocked_range<int>(0,m+n-1,10), OuterBody( c, a, m, b, n ) );
+}
+
+#include <stdio.h>
+
+const int M = 17*17;
+const int N = 13*13;
+
+int TestMain () {
+    MinThread = 1;
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        T a[M];
+        T b[N];
+        for( int m=1; m<=M; m*=17 ) {
+            for( int n=1; n<=M; n*=13 ) {
+                for( int i=0; i<m; ++i ) a[i] = T(1+i/5);
+                for( int i=0; i<n; ++i ) b[i] = T(1+i/7);
+                T expected[M+N];
+                SerialConvolve( expected, a, m, b, n );
+                task_scheduler_init init(p);
+                T actual[M+N];
+                for( int k = 0; k<2; ++k ) {
+                    memset( actual, -1, sizeof(actual) );
+                    switch(k) {
+                        case 0: 
+                            TBB_OpenMP_Convolve( actual, a, m, b, n ); 
+                            break;
+                        case 1: 
+                            OpenMP_TBB_Convolve( actual, a, m, b, n ); 
+                            break;
+                    }
+                    for( int i=0; i<m+n-1; ++i ) {
+                        ASSERT( actual[i]==expected[i], NULL );
+                    }
+                }
+            }
+        } 
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_do.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_do.cpp
new file mode 100644 (file)
index 0000000..42ffd53
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/parallel_do.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/atomic.h"
+#include "harness.h"
+#include "harness_cpu.h"
+
+#if defined(_MSC_VER) && defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif /* _MSC_VER && _Wp64 */
+
+#define N_DEPTHS     20
+
+static tbb::atomic<int> g_values_counter;
+
+class value_t {
+    size_t x;
+    value_t& operator= ( const value_t& );
+public:
+    value_t ( size_t xx ) : x(xx) { ++g_values_counter; }
+    value_t ( const value_t& v ) : x(v.value()) { ++g_values_counter; }
+    ~value_t () { --g_values_counter; }
+    size_t value() const volatile { return x; }
+};
+
+#include "harness_iterator.h"
+
+static size_t g_tasks_expected = 0;
+static tbb::atomic<size_t> g_tasks_observed;
+
+size_t FindNumOfTasks ( size_t max_depth ) {
+    if( max_depth == 0 )
+        return 1;
+    return  max_depth * FindNumOfTasks( max_depth - 1 ) + 1;
+}
+
+//! Simplest form of the parallel_do functor object.
+class FakeTaskGeneratorBody {
+public:
+    //! The simplest form of the function call operator
+    /** It does not allow adding new tasks during its execution. **/
+    void operator() ( value_t depth ) const {
+        g_tasks_observed += FindNumOfTasks(depth.value());
+    }
+};
+
+/** Work item is passed by reference here. **/
+class FakeTaskGeneratorBody_RefVersion {
+public:
+    void operator() ( value_t& depth ) const {
+        g_tasks_observed += FindNumOfTasks(depth.value());
+    }
+};
+
+/** Work item is passed by reference to const here. **/
+class FakeTaskGeneratorBody_ConstRefVersion {
+public:
+    void operator() ( const value_t& depth ) const {
+        g_tasks_observed += FindNumOfTasks(depth.value());
+    }
+};
+
+/** Work item is passed by reference to volatile here. **/
+class FakeTaskGeneratorBody_VolatileRefVersion {
+public:
+    void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& ) const {
+        g_tasks_observed += FindNumOfTasks(depth.value());
+    }
+};
+
+void do_work ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) {
+    ++g_tasks_observed;
+    size_t  d=depth.value();
+    --d;
+    for( size_t i = 0; i < depth.value(); ++i)
+        feeder.add(value_t(d));
+}
+
+//! Standard form of the parallel_do functor object.
+/** Allows adding new work items on the fly. **/
+class TaskGeneratorBody
+{
+public:
+    //! This form of the function call operator can be used when the body needs to add more work during the processing
+    void operator() ( value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(depth, feeder);
+    }
+private:
+    // Assert that parallel_do does not ever access body constructors
+    TaskGeneratorBody () {}
+    TaskGeneratorBody ( const TaskGeneratorBody& );
+    // TestBody() needs access to the default constructor
+    template<class Body, class Iterator> friend void TestBody( size_t );
+}; // class TaskGeneratorBody
+
+/** Work item is passed by reference here. **/
+class TaskGeneratorBody_RefVersion
+{
+public:
+    void operator() ( value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(depth, feeder);
+    }
+}; // class TaskGeneratorBody
+
+/** Work item is passed as const here. Compilers must ignore the const qualifier. **/
+class TaskGeneratorBody_ConstVersion
+{
+public:
+    void operator() ( const value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(depth, feeder);
+    }
+}; // class TaskGeneratorBody
+
+/** Work item is passed by reference to const here. **/
+class TaskGeneratorBody_ConstRefVersion
+{
+public:
+    void operator() ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(depth, feeder);
+    }
+}; // class TaskGeneratorBody
+
+/** Work item is passed by reference to volatile here. **/
+class TaskGeneratorBody_VolatileRefVersion
+{
+public:
+    void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(const_cast<value_t&>(depth), feeder);
+    }
+}; // class TaskGeneratorBody
+
+/** Work item is passed by reference to volatile here. **/
+class TaskGeneratorBody_ConstVolatileRefVersion
+{
+public:
+    void operator() ( const volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
+        do_work(const_cast<value_t&>(depth), feeder);
+    }
+}; // class TaskGeneratorBody
+
+
+static value_t g_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
+
+template<class Body, class Iterator>
+void TestBody ( size_t depth ) {
+    typedef typename std::iterator_traits<Iterator>::value_type value_type;
+    value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
+    Body body;
+    Iterator begin(a_depths);
+    Iterator end(a_depths + depth);
+    g_tasks_observed = 0;
+    tbb::parallel_do(begin, end, body);
+    ASSERT (g_tasks_observed == g_tasks_expected, NULL);
+}
+
+template<class Iterator>
+void TestIterator_RvalueOnly ( int /*nthread*/, size_t depth ) {
+    g_values_counter = 0;
+    TestBody<FakeTaskGeneratorBody, Iterator> (depth);
+    TestBody<FakeTaskGeneratorBody_ConstRefVersion, Iterator> (depth);
+    TestBody<TaskGeneratorBody, Iterator> (depth);
+    TestBody<TaskGeneratorBody_ConstVersion, Iterator> (depth);
+    TestBody<TaskGeneratorBody_ConstRefVersion, Iterator> (depth);
+}
+
+template<class Iterator>
+void TestIterator ( int nthread, size_t depth ) {
+    TestIterator_RvalueOnly<Iterator>(nthread, depth);
+    TestBody<FakeTaskGeneratorBody_RefVersion, Iterator> (depth);
+    TestBody<FakeTaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
+    TestBody<TaskGeneratorBody_RefVersion, Iterator> (depth);
+    TestBody<TaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
+    TestBody<TaskGeneratorBody_ConstVolatileRefVersion, Iterator> (depth);
+}
+
+void Run( int nthread ) {
+    for( size_t depth = 0; depth <= N_DEPTHS; ++depth ) {
+        g_tasks_expected = 0;
+        for ( size_t i=0; i < depth; ++i )
+            g_tasks_expected += FindNumOfTasks( g_depths[i].value() );
+        // Test for iterators over values convertible to work item type
+        TestIterator_RvalueOnly<size_t*>(nthread, depth);
+        // Test for random access iterators
+        TestIterator<value_t*>(nthread, depth);
+        // Test for input iterators
+        TestIterator<Harness::InputIterator<value_t> >(nthread, depth);
+        // Test for forward iterators
+        TestIterator<Harness::ForwardIterator<value_t> >(nthread, depth);
+    }
+}
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("number of threads must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        Run(p);
+        // Test that all workers sleep when no work
+        TestCPUUserTime(p);
+    }
+    // This check must be performed after the scheduler terminated because only in this 
+    // case there is a guarantee that the workers already destroyed their last tasks. 
+    ASSERT( g_values_counter == 0, "Value objects were leaked" );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for.cpp
new file mode 100644 (file)
index 0000000..68fe08b
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test for function template parallel_for.h
+
+#if _MSC_VER
+#pragma warning (push)
+#if !defined(__INTEL_COMPILER)
+    // Suppress pointless "unreachable code" warning.
+    #pragma warning (disable: 4702)
+#endif
+#if defined(_Wp64)
+    // Workaround for overzealous compiler warnings in /Wp64 mode
+    #pragma warning (disable: 4267)
+#endif
+#endif //#if _MSC_VER 
+
+#include "tbb/parallel_for.h"
+#include "tbb/atomic.h"
+#include "harness_assert.h"
+#include "harness.h"
+
+static tbb::atomic<int> FooBodyCount;
+
+//! An range object whose only public members are those required by the Range concept.
+template<size_t Pad>
+class FooRange {
+    //! Start of range
+    int start;
+
+    //! Size of range
+    int size;
+    FooRange( int start_, int size_ ) : start(start_), size(size_) {
+        zero_fill<char>(pad, Pad);
+        pad[Pad-1] = 'x';
+    }
+    template<size_t Pad_> friend void Flog( int nthread );
+    template<size_t Pad_> friend class FooBody;
+    void operator&();
+
+    char pad[Pad];
+public:
+    bool empty() const {return size==0;}
+    bool is_divisible() const {return size>1;}
+    FooRange( FooRange& original, tbb::split ) : size(original.size/2) {
+        original.size -= size;
+        start = original.start+original.size;
+        ASSERT( original.pad[Pad-1]=='x', NULL );
+        pad[Pad-1] = 'x';
+    }
+};
+
+//! An range object whose only public members are those required by the parallel_for.h body concept.
+template<size_t Pad>
+class FooBody {
+    static const int LIVE = 0x1234;
+    tbb::atomic<int>* array;
+    int state;
+    friend class FooRange<Pad>;
+    template<size_t Pad_> friend void Flog( int nthread );
+    FooBody( tbb::atomic<int>* array_ ) : array(array_), state(LIVE) {}
+public:
+    ~FooBody() {
+        --FooBodyCount;
+        for( size_t i=0; i<sizeof(*this); ++i )
+            reinterpret_cast<char*>(this)[i] = -1;
+    }
+    //! Copy constructor 
+    FooBody( const FooBody& other ) : array(other.array), state(other.state) {
+        ++FooBodyCount;
+        ASSERT( state==LIVE, NULL );
+    }
+    void operator()( FooRange<Pad>& r ) const {
+        for( int k=0; k<r.size; ++k )
+            array[r.start+k]++;
+    }
+};
+
+#include "tbb/tick_count.h"
+
+static const int N = 1000;
+static tbb::atomic<int> Array[N];
+
+template<size_t Pad>
+void Flog( int nthread ) {
+    tbb::tick_count T0 = tbb::tick_count::now();
+    for( int i=0; i<N; ++i ) {
+        for ( int mode = 0; mode < 4; ++mode) 
+        {
+            FooRange<Pad> r( 0, i );
+            const FooRange<Pad> rc = r;
+            FooBody<Pad> f( Array );
+            const FooBody<Pad> fc = f;
+            memset( Array, 0, sizeof(Array) );
+            FooBodyCount = 1;
+            switch (mode) {
+                case 0:
+                    tbb::parallel_for( rc, fc );
+                break;
+                case 1:
+                    tbb::parallel_for( rc, fc, tbb::simple_partitioner() );
+                break;
+                case 2:
+                    tbb::parallel_for( rc, fc, tbb::auto_partitioner() );
+                break;
+                case 3: {
+                    static tbb::affinity_partitioner affinity;
+                    tbb::parallel_for( rc, fc, affinity );
+                }
+                break;
+            }
+            for( int j=0; j<i; ++j ) 
+                ASSERT( Array[j]==1, NULL );
+            for( int j=i; j<N; ++j ) 
+                ASSERT( Array[j]==0, NULL );
+            // Destruction of bodies might take a while, but there should be at most one body per thread
+            // at this point.
+            while( FooBodyCount>1 && FooBodyCount<=nthread )
+                __TBB_Yield();
+            ASSERT( FooBodyCount==1, NULL );
+        }
+    }
+    tbb::tick_count T1 = tbb::tick_count::now();
+    REMARK("time=%g\tnthread=%d\tpad=%d\n",(T1-T0).seconds(),nthread,int(Pad));
+}
+
+// Testing parallel_for with step support
+const size_t PFOR_BUFFER_TEST_SIZE = 1024;
+// test_buffer has some extra items beyound right bound
+const size_t PFOR_BUFFER_ACTUAL_SIZE = PFOR_BUFFER_TEST_SIZE + 1024; 
+size_t pfor_buffer[PFOR_BUFFER_ACTUAL_SIZE];
+
+template<typename T>
+class TestFunctor{
+public:
+    void operator ()(T index) const {
+        pfor_buffer[index]++;
+    }
+};
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept> // std::invalid_argument
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+template <typename T>
+void TestParallelForWithStepSupport()
+{
+    const T pfor_buffer_test_size = static_cast<T>(PFOR_BUFFER_TEST_SIZE);
+    const T pfor_buffer_actual_size = static_cast<T>(PFOR_BUFFER_ACTUAL_SIZE);
+    // Testing parallel_for with different step values
+    for (T begin = 0; begin < pfor_buffer_test_size - 1; begin += pfor_buffer_test_size / 10 + 1) {
+        T step;
+        for (step = 1; step < pfor_buffer_test_size; step++) {
+            memset(pfor_buffer, 0, pfor_buffer_actual_size * sizeof(size_t));
+            if (step == 1){
+                tbb::parallel_for(begin, pfor_buffer_test_size, TestFunctor<T>());
+            } else {
+                tbb::parallel_for(begin, pfor_buffer_test_size, step, TestFunctor<T>());
+            }
+            // Verifying that parallel_for processed all items it should
+            for (T i = begin; i < pfor_buffer_test_size; i = i + step) {
+                ASSERT(pfor_buffer[i] == 1, "parallel_for didn't process all required elements");
+                pfor_buffer[i] = 0;
+            }
+            // Verifying that no extra items were processed and right bound of array wasn't crossed
+            for (T i = 0; i < pfor_buffer_actual_size; i++) {
+                ASSERT(pfor_buffer[i] == 0, "parallel_for processed an extra element");
+            }
+        }
+    }
+
+    // Testing some corner cases
+    tbb::parallel_for(static_cast<T>(2), static_cast<T>(1), static_cast<T>(1), TestFunctor<T>());
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    try{
+        tbb::parallel_for(static_cast<T>(1), static_cast<T>(100), static_cast<T>(0), TestFunctor<T>());  // should cause std::invalid_argument
+    }catch(std::invalid_argument){
+        return;
+    }
+    catch ( ... ) {
+        ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception. std::invalid_argument is expected" );
+    }
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+}
+
+// Exception support test
+#define HARNESS_EH_SIMPLE_MODE 1
+#include "tbb/tbb_exception.h"
+#include "harness_eh.h"
+
+#if TBB_USE_EXCEPTIONS
+class test_functor_with_exception {
+public:
+    void operator ()(size_t) const { ThrowTestException(); }
+};
+
+void TestExceptionsSupport() {
+    REMARK (__FUNCTION__);
+    { // Tests version with a step provided
+        ResetEhGlobals();
+        TRY();
+            tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, (size_t)1, test_functor_with_exception());
+        CATCH_AND_ASSERT();
+    }
+    { // Tests version without a step
+        ResetEhGlobals();
+        TRY();
+            tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, test_functor_with_exception());
+        CATCH_AND_ASSERT();
+    }
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+// Cancellation support test
+class functor_to_cancel {
+public:
+    void operator()(size_t) const {
+        ++g_CurExecuted;
+        CancellatorTask::WaitUntilReady();
+    }
+};
+
+size_t g_worker_task_step = 0;
+
+class my_worker_pfor_step_task : public tbb::task
+{
+    tbb::task_group_context &my_ctx;
+
+    tbb::task* execute () {
+        if (g_worker_task_step == 0){
+            tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, functor_to_cancel(), my_ctx);
+        }else{
+            tbb::parallel_for((size_t)0, (size_t)PFOR_BUFFER_TEST_SIZE, g_worker_task_step, functor_to_cancel(), my_ctx);
+        }
+        return NULL;
+    }
+public:
+    my_worker_pfor_step_task ( tbb::task_group_context &context_) : my_ctx(context_) { }
+};
+
+void TestCancellation()
+{
+    // tests version without a step
+    g_worker_task_step = 0;
+    ResetEhGlobals();
+    RunCancellationTest<my_worker_pfor_step_task, CancellatorTask>();
+
+    // tests version with step
+    g_worker_task_step = 1;
+    ResetEhGlobals();
+    RunCancellationTest<my_worker_pfor_step_task, CancellatorTask>();
+}
+
+#include "harness_m128.h"
+
+#if HAVE_m128 && !__TBB_SSE_STACK_ALIGNMENT_BROKEN
+struct SSE_Functor {
+    ClassWithSSE* Src, * Dst;
+    SSE_Functor( ClassWithSSE* src, ClassWithSSE* dst ) : Src(src), Dst(dst) {}
+
+    void operator()( tbb::blocked_range<int>& r ) const {
+        for( int i=r.begin(); i!=r.end(); ++i )
+            Dst[i] = Src[i];
+    }     
+};
+
+//! Test that parallel_for works with stack-allocated __m128
+void TestSSE() {
+    ClassWithSSE Array1[N], Array2[N];
+    for( int i=0; i<N; ++i )
+        Array1[i] = ClassWithSSE(i);
+    tbb::parallel_for( tbb::blocked_range<int>(0,N), SSE_Functor(Array1, Array2) );
+    for( int i=0; i<N; ++i )
+        ASSERT( Array2[i]==ClassWithSSE(i), NULL ) ;
+}
+#endif /* HAVE_m128 */
+
+#include <cstdio>
+#include "tbb/task_scheduler_init.h"
+#include "harness_cpu.h"
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("number of threads must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        if( p>0 ) {
+            tbb::task_scheduler_init init( p );
+            Flog<1>(p);
+            Flog<10>(p);
+            Flog<100>(p);
+            Flog<1000>(p);
+            Flog<10000>(p);
+
+            // Testing with different integer types
+            TestParallelForWithStepSupport<short>();
+            TestParallelForWithStepSupport<unsigned short>();
+            TestParallelForWithStepSupport<int>();
+            TestParallelForWithStepSupport<unsigned int>();
+            TestParallelForWithStepSupport<long>();
+            TestParallelForWithStepSupport<unsigned long>();
+            TestParallelForWithStepSupport<long long>();
+            TestParallelForWithStepSupport<unsigned long long>();
+            TestParallelForWithStepSupport<size_t>();
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+            TestExceptionsSupport();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+            if (p>1) TestCancellation();
+#if HAVE_m128 && !__TBB_SSE_STACK_ALIGNMENT_BROKEN
+            TestSSE();
+#endif /* HAVE_m128 */
+
+            // Test that all workers sleep when no work
+            TestCPUUserTime(p);
+        }
+    }
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception handling tests are skipped.\n");
+#endif
+#if HAVE_m128 && __TBB_SSE_STACK_ALIGNMENT_BROKEN
+    REPORT("Known issue: stack alignment for SSE not tested.\n");
+#endif
+    return Harness::Done;
+}
+
+#if _MSC_VER
+#pragma warning (pop)
+#endif
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for_each.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_for_each.cpp
new file mode 100644 (file)
index 0000000..320aadf
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning(disable: 4180) // "qualifier applied to function type has no meaning; ignored"
+#endif
+
+#include "tbb/parallel_for_each.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/atomic.h"
+#include "harness.h"
+#include "harness_iterator.h"
+
+// Some old compilers can't deduce template paremeter type for parallel_for_each
+// if the function name is passed without explicit cast to function pointer.
+typedef void (*TestFunctionType)(size_t);
+
+tbb::atomic<size_t> sum;
+
+// This function is called via parallel_for_each
+void TestFunction (size_t value) {
+    sum += (unsigned int)value;
+}
+
+const size_t NUMBER_OF_ELEMENTS = 1000;
+
+// Tests tbb::parallel_for_each functionality
+template <typename Iterator>
+void RunPForEachTests()
+{
+    size_t test_vector[NUMBER_OF_ELEMENTS + 1];
+
+    sum = 0;
+    size_t test_sum = 0;
+
+    for (size_t i =0; i < NUMBER_OF_ELEMENTS; i++) { 
+        test_vector[i] = i;
+        test_sum += i;
+    }
+    test_vector[NUMBER_OF_ELEMENTS] = 1000000; // parallel_for_each shouldn't touch this element
+
+    Iterator begin(&test_vector[0]);
+    Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
+
+    tbb::parallel_for_each(begin, end, (TestFunctionType)TestFunction);
+    ASSERT(sum == test_sum, "Not all items of test vector were processed by parallel_for_each");
+    ASSERT(test_vector[NUMBER_OF_ELEMENTS] == 1000000, "parallel_for_each processed an extra element");
+}
+
+typedef void (*TestMutatorType)(size_t&);
+
+void TestMutator(size_t& value) {
+    ASSERT(value==0,NULL);
+    ++sum;
+    ++value;
+}
+
+//! Test that tbb::parallel_for_each works for mutable iterators.
+template <typename Iterator>
+void RunMutablePForEachTests() {
+    size_t test_vector[NUMBER_OF_ELEMENTS];
+    for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i )
+        test_vector[i] = 0;
+    sum = 0;
+    tbb::parallel_for_each( Iterator(test_vector), Iterator(test_vector+NUMBER_OF_ELEMENTS), (TestMutatorType)TestMutator );
+    ASSERT( sum==NUMBER_OF_ELEMENTS, "parallel_for_each called function wrong number of times" );
+    for( size_t i=0; i<NUMBER_OF_ELEMENTS; ++i )
+        ASSERT( test_vector[i]==1, "parallel_for_each did not process each element exactly once" );
+}
+
+#define HARNESS_EH_SIMPLE_MODE 1
+#include "tbb/tbb_exception.h"
+#include "harness_eh.h"
+
+#if TBB_USE_EXCEPTIONS
+void test_function_with_exception(size_t) {
+    ThrowTestException();
+}
+
+template <typename Iterator>
+void TestExceptionsSupport()
+{
+    REMARK (__FUNCTION__);
+    size_t test_vector[NUMBER_OF_ELEMENTS + 1];
+
+    for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) { 
+        test_vector[i] = i;
+    }
+
+    Iterator begin(&test_vector[0]);
+    Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
+
+    TRY();
+        tbb::parallel_for_each(begin, end, (TestFunctionType)test_function_with_exception);
+    CATCH_AND_ASSERT();
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+// Cancelation support test
+void function_to_cancel(size_t ) {
+    ++g_CurExecuted;
+    CancellatorTask::WaitUntilReady();
+}
+
+template <typename Iterator>
+class my_worker_pforeach_task : public tbb::task
+{
+    tbb::task_group_context &my_ctx;
+
+    tbb::task* execute () {
+        size_t test_vector[NUMBER_OF_ELEMENTS + 1];
+        for (size_t i = 0; i < NUMBER_OF_ELEMENTS; i++) { 
+            test_vector[i] = i;
+        }
+        Iterator begin(&test_vector[0]);
+        Iterator end(&test_vector[NUMBER_OF_ELEMENTS]);
+
+        tbb::parallel_for_each(begin, end, (TestFunctionType)function_to_cancel);
+        
+        return NULL;
+    }
+public:
+    my_worker_pforeach_task ( tbb::task_group_context &ctx) : my_ctx(ctx) { }
+};
+
+template <typename Iterator>
+void TestCancellation()
+{
+    REMARK (__FUNCTION__);
+    ResetEhGlobals();
+    RunCancellationTest<my_worker_pforeach_task<Iterator>, CancellatorTask>();
+}
+
+#include "harness_cpu.h"
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("number of threads must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        RunPForEachTests<Harness::RandomIterator<size_t> >();
+        RunPForEachTests<Harness::InputIterator<size_t> >();
+        RunPForEachTests<Harness::ForwardIterator<size_t> >();
+        RunMutablePForEachTests<Harness::RandomIterator<size_t> >();
+        RunMutablePForEachTests<Harness::ForwardIterator<size_t> >();
+
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+        TestExceptionsSupport<Harness::RandomIterator<size_t> >();
+        TestExceptionsSupport<Harness::InputIterator<size_t> >();
+        TestExceptionsSupport<Harness::ForwardIterator<size_t> >();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+        if (p > 1) {
+            TestCancellation<Harness::RandomIterator<size_t> >();
+            TestCancellation<Harness::InputIterator<size_t> >();
+            TestCancellation<Harness::ForwardIterator<size_t> >();
+        }
+        // Test that all workers sleep when no work
+        TestCPUUserTime(p);
+    }
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception handling tests are skipped.\n");
+#endif
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_invoke.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_invoke.cpp
new file mode 100644 (file)
index 0000000..e4ce649
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+#pragma warning(disable: 4180) // "qualifier applied to function type has no meaning; ignored"
+#endif
+
+#include "tbb/parallel_invoke.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/atomic.h"
+#include "tbb/tbb_exception.h"
+#include "harness.h"
+
+#if !defined(__INTEL_COMPILER)
+#if defined(_MSC_VER) && _MSC_VER <= 1400  ||  __GNUC__==3 && __GNUC_MINOR__<=3  ||  __SUNPRO_CC
+    #define __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN 1
+#endif
+#endif
+
+static const size_t MAX_NUMBER_OF_PINVOKE_ARGS = 10;
+tbb::atomic<size_t> function_counter;
+
+// Some macros to make the test easier to read
+
+// 10 functions test0 ... test9 are defined
+// pointer to each function is also defined
+
+#define TEST_FUNCTION(value) void test##value () \
+{   \
+    ASSERT(!(function_counter & (1 << value)), "Test function has already been called"); \
+    function_counter += 1 << value; \
+}   \
+void (*test_pointer##value)(void) = test##value;
+
+TEST_FUNCTION(0)
+TEST_FUNCTION(1)
+TEST_FUNCTION(2)
+TEST_FUNCTION(3)
+TEST_FUNCTION(4)
+TEST_FUNCTION(5)
+TEST_FUNCTION(6)
+TEST_FUNCTION(7)
+TEST_FUNCTION(8)
+TEST_FUNCTION(9)
+
+// The same with functors
+#define TEST_FUNCTOR(value) class test_functor##value  \
+{   \
+public: \
+    void operator() () const {  \
+        function_counter += 1 << value;   \
+    }   \
+} functor##value;
+
+TEST_FUNCTOR(0)
+TEST_FUNCTOR(1)
+TEST_FUNCTOR(2)
+TEST_FUNCTOR(3)
+TEST_FUNCTOR(4)
+TEST_FUNCTOR(5)
+TEST_FUNCTOR(6)
+TEST_FUNCTOR(7)
+TEST_FUNCTOR(8)
+TEST_FUNCTOR(9)
+
+#define INIT_TEST function_counter = 0;
+
+#define VALIDATE_INVOKE_RUN(number_of_args, test_type) \
+    ASSERT( (size_t)function_counter == (size_t)(1 << number_of_args) - 1, "parallel_invoke called with " #number_of_args " arguments didn't process all " #test_type);
+
+// Calls parallel_invoke for different number of arguments
+// It can be called with and without user context
+template <typename F0, typename F1, typename F2, typename F3, typename F4, typename F5,
+    typename F6, typename F7, typename F8, typename F9>
+void call_parallel_invoke( size_t n, F0& f0, F1& f1, F2& f2, F3& f3, F4 &f4, F5 &f5,
+                          F6& f6, F7 &f7, F8 &f8, F9 &f9, tbb::task_group_context* context) {
+    switch(n) {
+    default:
+        ASSERT(false, "number of arguments must be between 2 and 10");
+    case 2:
+        if (context)
+            tbb::parallel_invoke (f0, f1, *context);
+        else
+            tbb::parallel_invoke (f0, f1);
+        break;
+    case 3:
+        if (context)
+            tbb::parallel_invoke (f0, f1, f2, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2);
+        break;
+    case 4:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3);
+        break;
+    case 5:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4);
+        break;
+    case 6:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5);
+        break;
+    case 7:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6);
+        break;
+    case 8:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7);
+        break;
+    case 9:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8);
+        break;
+    case 10:
+        if(context)
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, *context);
+        else
+            tbb::parallel_invoke (f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+        break;
+    }
+}
+
+#if !__TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN
+template<typename function> void aux_invoke(const function& f) {
+    f();
+}
+
+bool function_by_constref_in_template_codegen_broken() {
+    function_counter = 0;
+    aux_invoke(test1);
+    return function_counter==0;
+}
+#endif /* !__TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN */
+
+void test_parallel_invoke()
+{
+    REMARK (__FUNCTION__);
+    // Testing with pointers to functions
+    for (int n = 2; n <=10; n++)
+    {
+        INIT_TEST;
+        call_parallel_invoke(n, test_pointer0, test_pointer1, test_pointer2, test_pointer3, test_pointer4,
+            test_pointer5, test_pointer6, test_pointer7, test_pointer8, test_pointer9, NULL);
+        VALIDATE_INVOKE_RUN(n, "pointers to function");
+    }
+
+    // Testing parallel_invoke with functors
+    for (int n = 2; n <=10; n++)
+    {
+        INIT_TEST;
+        call_parallel_invoke(n, functor0, functor1, functor2, functor3, functor4,
+            functor5, functor6, functor7, functor8, functor9, NULL);
+        VALIDATE_INVOKE_RUN(n, "functors");
+    }
+
+#if __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN
+    // some old compilers can't cope with passing function name into parallel_invoke
+#else
+    // and some compile but generate broken code that does not call the function
+    if (function_by_constref_in_template_codegen_broken())
+        return;
+
+    // Testing parallel_invoke with functions
+    for (int n = 2; n <=10; n++)
+    {
+        INIT_TEST;
+        call_parallel_invoke(n, test0, test1, test2, test3, test4, test5, test6, test7, test8, test9, NULL);
+        VALIDATE_INVOKE_RUN(n, "functions");
+    }
+#endif
+}
+
+// Exception handling support test
+
+#define HARNESS_EH_SIMPLE_MODE 1
+#include "harness_eh.h"
+
+#if TBB_USE_EXCEPTIONS
+volatile size_t exception_mask; // each bit represents whether the function should throw exception or not
+
+// throws exception if corresponding exception_mask bit is set
+#define TEST_FUNCTOR_WITH_THROW(value) \
+struct throwing_functor##value { \
+    void operator() () const {  \
+        if (exception_mask & (1 << value))   \
+            ThrowTestException();    \
+    }   \
+} test_with_throw##value;
+
+TEST_FUNCTOR_WITH_THROW(0)
+TEST_FUNCTOR_WITH_THROW(1)
+TEST_FUNCTOR_WITH_THROW(2)
+TEST_FUNCTOR_WITH_THROW(3)
+TEST_FUNCTOR_WITH_THROW(4)
+TEST_FUNCTOR_WITH_THROW(5)
+TEST_FUNCTOR_WITH_THROW(6)
+TEST_FUNCTOR_WITH_THROW(7)
+TEST_FUNCTOR_WITH_THROW(8)
+TEST_FUNCTOR_WITH_THROW(9)
+
+void TestExceptionHandling()
+{
+    REMARK (__FUNCTION__);
+    for( size_t n = 2; n <= 10; ++n ) {
+        for( exception_mask = 1; exception_mask < (size_t) (1 << n); ++exception_mask ) {
+            ResetEhGlobals();
+            TRY();
+                REMARK("Calling parallel_invoke, number of functions = %d, exception_mask = %d\n", n, exception_mask);
+                call_parallel_invoke(n, test_with_throw0, test_with_throw1, test_with_throw2, test_with_throw3,
+                    test_with_throw4, test_with_throw5, test_with_throw6, test_with_throw7, test_with_throw8, test_with_throw9, NULL);
+            CATCH_AND_ASSERT();
+        }
+    }
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+// Cancelation support test
+void function_to_cancel() {
+    ++g_CurExecuted;
+    CancellatorTask::WaitUntilReady();
+}
+
+// The function is used to test cancellation
+void simple_test_nothrow (){
+    ++g_CurExecuted;
+}
+
+size_t g_numFunctions,
+       g_functionToCancel;
+
+class ParInvokeLauncherTask : public tbb::task
+{
+    tbb::task_group_context &my_ctx;
+    void(*func_array[10])(void);
+
+    tbb::task* execute () {
+        func_array[g_functionToCancel] = &function_to_cancel;
+        call_parallel_invoke(g_numFunctions, func_array[0], func_array[1], func_array[2], func_array[3],
+            func_array[4], func_array[5], func_array[6], func_array[7], func_array[8], func_array[9], &my_ctx);
+        return NULL;
+    }
+public:
+    ParInvokeLauncherTask ( tbb::task_group_context& ctx ) : my_ctx(ctx) {
+        for (int i = 0; i <=9; ++i)
+            func_array[i] = &simple_test_nothrow;
+    }
+};
+
+void TestCancellation ()
+{
+    REMARK (__FUNCTION__);
+    for ( int n = 2; n <= 10; ++n ) {
+        for ( int m = 0; m <= n - 1; ++m ) {
+            g_numFunctions = n;
+            g_functionToCancel = m;
+            ResetEhGlobals();
+            RunCancellationTest<ParInvokeLauncherTask, CancellatorTask>();
+        }
+    }
+}
+
+//------------------------------------------------------------------------
+// Entry point
+//------------------------------------------------------------------------
+
+#include "harness_cpu.h"
+
+int TestMain () {
+    MinThread = min(MinThread, MaxThread);
+    ASSERT (MinThread>=1, "Minimal number of threads must be 1 or more");
+    for ( int p = MinThread; p <= MaxThread; ++p ) {
+        tbb::task_scheduler_init init(p);
+        test_parallel_invoke();
+        if (p > 1) {
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+            REPORT("Known issue: exception handling tests are skipped.\n");
+#elif TBB_USE_EXCEPTIONS
+            TestExceptionHandling();
+#endif /* TBB_USE_EXCEPTIONS */
+            TestCancellation();
+        }
+        TestCPUUserTime(p);
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_pipeline.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_pipeline.cpp
new file mode 100644 (file)
index 0000000..e5cd43c
--- /dev/null
@@ -0,0 +1,354 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Before including pipeline.h, set up the variable to count heap allocated
+// filter_node objects, and make it known for the header.
+int filter_node_count = 0;
+#define __TBB_TEST_FILTER_NODE_COUNT filter_node_count
+#include "tbb/pipeline.h"
+
+#include "tbb/atomic.h"
+#include "harness.h"
+
+const int n_tokens = 8;
+const int max_counter = 16;
+static tbb::atomic<int> output_counter;
+static tbb::atomic<int> input_counter;
+static tbb::atomic<int> check_type_counter;
+
+class check_type : Harness::NoAfterlife {
+    unsigned int id;
+    bool am_ready;
+public:
+    check_type( ) : id(0), am_ready(false) {
+        ++check_type_counter;
+    }
+
+    check_type(const check_type& other) : Harness::NoAfterlife(other) {
+        other.AssertLive();
+        AssertLive();
+        id = other.id;
+        am_ready = other.am_ready;
+        ++check_type_counter;
+    }
+
+    ~check_type() { 
+        AssertLive(); 
+        --check_type_counter;
+    }
+    unsigned int my_id() { AssertLive(); return id; }
+    bool is_ready() { AssertLive(); return am_ready; }
+    void function() {
+        AssertLive();
+        if( id == 0 ) {
+            id = 1;
+            am_ready = true;
+        }
+    }
+};
+
+// Filters must be copy-constructible, and be const-qualifiable.
+template<typename U>
+class input_filter : Harness::NoAfterlife {
+public:
+    U operator()( tbb::flow_control& control ) const {
+        AssertLive();
+        if( --input_counter < 0 ) {
+            control.stop();
+        }
+        return U();  // default constructed
+    }
+
+};
+
+template<>
+class input_filter<void> : Harness::NoAfterlife {
+public:
+    void operator()( tbb::flow_control& control ) const {
+        AssertLive();
+        if( --input_counter < 0 ) {
+            control.stop();
+        }
+    }
+
+};
+
+
+template<>
+class input_filter<check_type> : Harness::NoAfterlife {
+public:
+    check_type operator()( tbb::flow_control& control ) const {
+        AssertLive();
+        if( --input_counter < 0 ) {
+            control.stop();
+        }
+        return check_type( );  // default constructed
+    }
+};
+
+template<typename T, typename U>
+class middle_filter : Harness::NoAfterlife {
+public:
+    U operator()(T /*my_storage*/) const {
+        AssertLive();
+        return U();
+    }
+};
+
+template<>
+class middle_filter<check_type, check_type> : Harness::NoAfterlife {
+public:
+    check_type& operator()( check_type &c) const {
+        AssertLive();
+        ASSERT(!c.my_id(), "bad id value");
+        ASSERT(!c.is_ready(), "Already ready" );
+        c.function();
+        return c;
+    }
+
+};
+
+template<typename T>
+class output_filter : Harness::NoAfterlife {
+public:
+    void operator()(T) const {
+        AssertLive();
+        output_counter++;
+    }
+};
+
+template<>
+class output_filter<check_type> : Harness::NoAfterlife {
+public:
+    void operator()(check_type &c) const {
+        AssertLive();
+        ASSERT(c.my_id(), "unset id value");
+        ASSERT(c.is_ready(), "not yet ready");
+        output_counter++;
+    }
+};
+
+void resetCounters() {
+    output_counter = 0;
+    input_counter = max_counter;
+}
+
+void checkCounters() {
+    ASSERT(output_counter == max_counter, "not all tokens were passed through pipeline");
+}
+
+static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order}; 
+const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]);
+
+typedef tbb::filter_t<void, void> filter_chain;
+typedef tbb::filter::mode mode_array;
+
+// The filters are passed by value, which forces a temporary copy to be created.  This is
+// to reproduce the bug where a filter_chain uses refs to filters, which after a call
+// would be references to destructed temporaries.
+template<typename type1, typename type2>
+void fill_chain( filter_chain &my_chain, mode_array *filter_type, input_filter<type1> i_filter,
+         middle_filter<type1, type2> m_filter, output_filter<type2> o_filter ) {
+    my_chain = tbb::make_filter<void, type1>(filter_type[0], i_filter) &
+        tbb::make_filter<type1, type2>(filter_type[1], m_filter) &
+        tbb::make_filter<type2, void>(filter_type[2], o_filter);
+}
+
+void run_function_spec() {
+    ASSERT(!filter_node_count, NULL);
+    REMARK("Testing < void, void > (single filter in pipeline)");
+#if __TBB_LAMBDAS_PRESENT
+    REMARK( " ( + lambdas)");
+#endif
+    REMARK("\n");
+    input_filter<void> i_filter;
+    // Test pipeline that contains only one filter
+    for( unsigned i = 0; i<number_of_filter_types; i++) {
+        tbb::filter_t<void, void> one_filter( filter_table[i], i_filter );
+        ASSERT(filter_node_count==1, "some filter nodes left after previous iteration?");
+        resetCounters();
+        tbb::parallel_pipeline( n_tokens, one_filter );
+        // no need to check counters
+#if __TBB_LAMBDAS_PRESENT
+        tbb::atomic<int> counter;
+        counter = max_counter;
+        // Construct filter using lambda-syntax when parallel_pipeline() is being run;
+        tbb::parallel_pipeline( n_tokens, 
+            tbb::make_filter<void, void>(filter_table[i], [&counter]( tbb::flow_control& control ) {
+                    if( counter-- == 0 )
+                        control.stop();
+                    }
+            )
+        );
+#endif
+    }
+    ASSERT(!filter_node_count, "filter_node objects leaked");
+}
+
+template<typename type1, typename type2>
+void run_function(const char *l1, const char *l2) {
+    ASSERT(!filter_node_count, NULL);
+    REMARK("Testing < %s, %s >", l1, l2 );
+#if __TBB_LAMBDAS_PRESENT
+    REMARK( " ( + lambdas)");
+#endif
+    REMARK("\n");
+
+    const size_t number_of_filters = 3;
+
+    input_filter<type1> i_filter;
+    middle_filter<type1, type2> m_filter;
+    output_filter<type2> o_filter;
+
+    unsigned limit = 1;
+    // Test pipeline that contains number_of_filters filters
+    for( unsigned i=0; i<number_of_filters; ++i)
+        limit *= number_of_filter_types;
+    // Iterate over possible filter sequences
+    for( unsigned numeral=0; numeral<limit; ++numeral ) {
+        unsigned temp = numeral;
+        tbb::filter::mode filter_type[number_of_filter_types];
+        for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) 
+            filter_type[i] = filter_table[temp%number_of_filter_types];
+
+        tbb::filter_t<void, type1> filter1( filter_type[0], i_filter );
+        tbb::filter_t<type1, type2> filter2( filter_type[1], m_filter );
+        tbb::filter_t<type2, void> filter3( filter_type[2], o_filter );
+        ASSERT(filter_node_count==3, "some filter nodes left after previous iteration?");
+        resetCounters();
+        // Create filters sequence when parallel_pipeline() is being run
+        tbb::parallel_pipeline( n_tokens, filter1 & filter2 & filter3 );
+        checkCounters();
+
+        // Create filters sequence partially outside parallel_pipeline() and also when parallel_pipeline() is being run
+        tbb::filter_t<void, type2> filter12;
+        filter12 = filter1 & filter2;
+        resetCounters();
+        tbb::parallel_pipeline( n_tokens, filter12 & filter3 );
+        checkCounters();
+
+        tbb::filter_t<void, void> filter123 = filter12 & filter3;
+        // Run pipeline twice with the same filter sequence
+        for( unsigned i = 0; i<2; i++ ) {
+            resetCounters();
+            tbb::parallel_pipeline( n_tokens, filter123 );
+            checkCounters();
+        }
+
+        // Now copy-construct another filter_t instance, and use it to run pipeline
+        {
+            tbb::filter_t<void, void> copy123( filter123 );
+            resetCounters();
+            tbb::parallel_pipeline( n_tokens, copy123 );
+            checkCounters();
+        }
+
+        // Construct filters and create the sequence when parallel_pipeline() is being run
+        resetCounters();
+        tbb::parallel_pipeline( n_tokens, 
+                   tbb::make_filter<void, type1>(filter_type[0], i_filter) &
+                   tbb::make_filter<type1, type2>(filter_type[1], m_filter) &
+                   tbb::make_filter<type2, void>(filter_type[2], o_filter) );
+        checkCounters();
+
+        // Construct filters, make a copy, destroy the original filters, and run with the copy
+        int cnt = filter_node_count;
+        {
+            tbb::filter_t<void, void>* p123 = new tbb::filter_t<void,void> (
+                   tbb::make_filter<void, type1>(filter_type[0], i_filter) &
+                   tbb::make_filter<type1, type2>(filter_type[1], m_filter) &
+                   tbb::make_filter<type2, void>(filter_type[2], o_filter) );
+            ASSERT(filter_node_count==cnt+5, "filter node accounting error?");
+            tbb::filter_t<void, void> copy123( *p123 );
+            delete p123;
+            ASSERT(filter_node_count==cnt+5, "filter nodes deleted prematurely?");
+            resetCounters();
+            tbb::parallel_pipeline( n_tokens, copy123 );
+            checkCounters();
+        }
+
+        // construct a filter with temporaries
+        {
+            tbb::filter_t<void, void> my_filter;
+            fill_chain<type1,type2>( my_filter, filter_type, i_filter, m_filter, o_filter );
+            resetCounters();
+            tbb::parallel_pipeline( n_tokens, my_filter );
+            checkCounters();
+        }
+        ASSERT(filter_node_count==cnt, "scope ended but filter nodes not deleted?");
+
+#if __TBB_LAMBDAS_PRESENT
+        tbb::atomic<int> counter;
+        counter = max_counter;
+        // Construct filters using lambda-syntax and create the sequence when parallel_pipeline() is being run;
+        resetCounters();  // only need the output_counter reset.
+        tbb::parallel_pipeline( n_tokens, 
+            tbb::make_filter<void, type1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> type1 {
+                    if( --counter < 0 )
+                        control.stop();
+                    return type1(); }
+            ) &
+            tbb::make_filter<type1, type2>(filter_type[1], []( type1 /*my_storage*/ ) -> type2 {
+                    return type2(); }
+            ) &
+            tbb::make_filter<type2, void>(filter_type[2], [] ( type2 ) -> void { 
+                    output_counter++; }
+            ) 
+        );
+        checkCounters();
+#endif
+    }
+    ASSERT(!filter_node_count, "filter_node objects leaked");
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain() {
+    // Test with varying number of threads.
+    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        // Initialize TBB task scheduler
+        REMARK("\nTesting with nthread=%d\n", nthread);
+        tbb::task_scheduler_init init(nthread);
+        
+        // Run test several times with different types
+        run_function_spec();
+        run_function<size_t,int>("size_t", "int");
+        run_function<int,double>("int", "double");
+        check_type_counter = 0;
+        run_function<check_type,size_t>("check_type", "size_t");
+        ASSERT(!check_type_counter, "Error in check_type creation/destruction");
+        // check_type as the second type in the pipeline only works if check_type
+        // is also the first type.  The middle_filter specialization for <check_type, check_type>
+        // changes the state of the check_type items, and this is checked in the output_filter
+        // specialization.
+        run_function<check_type, check_type>("check_type", "check_type");
+        ASSERT(!check_type_counter, "Error in check_type creation/destruction");
+    }
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_reduce.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_reduce.cpp
new file mode 100644 (file)
index 0000000..c29eb89
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/parallel_reduce.h"
+#include "tbb/atomic.h"
+#include "harness_assert.h"
+
+using namespace std;
+
+static tbb::atomic<long> ForkCount;
+static tbb::atomic<long> FooBodyCount;
+
+//! Class with public interface that is exactly minimal requirements for Range concept
+class MinimalRange {
+    size_t begin, end;
+    friend class FooBody;
+    explicit MinimalRange( size_t i ) : begin(0), end(i) {}
+    friend void Flog( int nthread, bool inteference );
+public:
+    MinimalRange( MinimalRange& r, tbb::split ) : end(r.end) {
+        begin = r.end = (r.begin+r.end)/2;
+    }
+    bool is_divisible() const {return end-begin>=2;}
+    bool empty() const {return begin==end;}
+};
+
+//! Class with public interface that is exactly minimal requirements for Body of a parallel_reduce
+class FooBody {
+private:
+    FooBody( const FooBody& );          // Deny access
+    void operator=( const FooBody& );   // Deny access
+    friend void Flog( int nthread, bool interference );
+    //! Parent that created this body via split operation.  NULL if original body.
+    FooBody* parent;
+    //! Total number of index values processed by body and its children.
+    size_t sum;
+    //! Number of join operations done so far on this body and its children.
+    long join_count;
+    //! Range that has been processed so far by this body and its children.
+    size_t begin, end;
+    //! True if body has not yet been processed at least once by operator().
+    bool is_new;
+    //! 1 if body was created by split; 0 if original body.
+    int forked;
+    FooBody() {++FooBodyCount;}
+public:
+    ~FooBody() {
+        forked = 0xDEADBEEF; 
+        sum=0xDEADBEEF;
+        join_count=0xDEADBEEF;
+        --FooBodyCount;
+    } 
+    FooBody( FooBody& other, tbb::split ) {
+        ++FooBodyCount;
+        ++ForkCount;
+        sum = 0;
+        parent = &other;
+        join_count = 0;
+        is_new = true;
+        forked = 1;
+    }
+    void join( FooBody& s ) {
+        ASSERT( s.forked==1, NULL );
+        ASSERT( this!=&s, NULL );
+        ASSERT( this==s.parent, NULL ); 
+        ASSERT( end==s.begin, NULL );
+        end = s.end;
+        sum += s.sum;
+        join_count += s.join_count + 1;
+        s.forked = 2;
+    }
+    void operator()( const MinimalRange& r ) {
+        for( size_t k=r.begin; k<r.end; ++k )
+            ++sum;
+        if( is_new ) {
+            is_new = false;
+            begin = r.begin;
+        } else
+            ASSERT( end==r.begin, NULL );
+        end = r.end;
+    }
+};
+
+#include <cstdio>
+#include "harness.h"
+#include "tbb/tick_count.h"
+
+void Flog( int nthread, bool interference=false ) {
+    for (int mode = 0;  mode < 4; mode++) {
+        tbb::tick_count T0 = tbb::tick_count::now();
+        long join_count = 0;        
+        tbb::affinity_partitioner ap;
+        for( size_t i=0; i<=1000; ++i ) {
+            FooBody f;
+            f.sum = 0;
+            f.parent = NULL;
+            f.join_count = 0;
+            f.is_new = true;
+            f.forked = 0;
+            f.begin = ~size_t(0);
+            f.end = ~size_t(0);
+            ASSERT( FooBodyCount==1, NULL );
+            switch (mode) {
+                case 0:
+                    tbb::parallel_reduce( MinimalRange(i), f );
+                    break;
+                case 1:
+                    tbb::parallel_reduce( MinimalRange(i), f, tbb::simple_partitioner() );
+                    break;
+                case 2:
+                    tbb::parallel_reduce( MinimalRange(i), f, tbb::auto_partitioner() );
+                    break;
+                case 3: 
+                    tbb::parallel_reduce( MinimalRange(i), f, ap );
+                    break;
+            }
+            join_count += f.join_count;
+            ASSERT( FooBodyCount==1, NULL );
+            ASSERT( f.sum==i, NULL );
+            ASSERT( f.begin==(i==0 ? ~size_t(0) : 0), NULL );
+            ASSERT( f.end==(i==0 ? ~size_t(0) : i), NULL );
+        }
+        tbb::tick_count T1 = tbb::tick_count::now();
+        REMARK("time=%g join_count=%ld ForkCount=%ld nthread=%d%s\n",
+                   (T1-T0).seconds(),join_count,long(ForkCount), nthread, interference ? " with interference)":"");
+    }
+}
+
+class DeepThief: public tbb::task {
+    /*override*/tbb::task* execute() {
+        if( !is_stolen_task() )
+            spawn(*child);
+        wait_for_all();
+        return NULL;
+    }
+    task* child;
+    friend void FlogWithInterference(int);
+public:
+    DeepThief() : child() {}
+};
+
+//! Test for problem in TBB 2.1 parallel_reduce where middle of a range is stolen.
+/** Warning: this test is a somewhat abusive use of TBB somewhat because 
+    it requires two or more threads to avoid deadlock. */
+void FlogWithInterference( int nthread ) {
+    ASSERT( nthread>=2, "requires too or more threads" );
+
+    // Build linear chain of tasks. 
+    // The purpose is to drive up "task depth" in TBB 2.1.
+    // An alternative would be to use add_to_depth, but that method is deprecated in TBB 2.2,
+    // and this way we generalize to catching problems with implicit depth calculations.
+    tbb::task* root = new( tbb::task::allocate_root() ) tbb::empty_task;
+    root->set_ref_count(2);
+    tbb::task* t = root;
+    for( int i=0; i<3; ++i ) {
+        t = new( t->allocate_child() ) tbb::empty_task;
+        t->set_ref_count(1);
+    }
+
+    // Append a DeepThief to the chain.
+    DeepThief* deep_thief = new( t->allocate_child() ) DeepThief;
+    deep_thief->set_ref_count(2);
+
+    // Append a leaf to the chain. 
+    tbb::task* leaf = new( deep_thief->allocate_child() ) tbb::empty_task;
+    deep_thief->child = leaf;
+
+    root->spawn(*deep_thief);
+
+    Flog(nthread,true);
+
+    if( root->ref_count()==2 ) {
+        // Spawn leaf, which when it finishes, cause the DeepThief and rest of the chain to finish.
+        root->spawn( *leaf );
+    }
+    // Wait for all tasks in the chain from root to leaf to finish.
+    root->wait_for_all();
+    root->destroy( *root );
+}
+
+#include "tbb/blocked_range.h"
+
+#if _MSC_VER
+    typedef tbb::internal::uint64_t ValueType;
+#else
+    typedef uint64_t ValueType;
+#endif
+
+struct Sum {
+    template<typename T>
+    T operator() ( const T& v1, const T& v2 ) const {
+        return v1 + v2;
+    }
+};
+
+struct Accumulator {
+    ValueType operator() ( const tbb::blocked_range<ValueType*>& r, ValueType value ) const {
+        for ( ValueType* pv = r.begin(); pv != r.end(); ++pv )
+            value += *pv;
+        return value;
+    }
+};
+
+void ParallelSum () {
+    const ValueType I = 0,
+                    N = 1000000,
+                    R = N * (N + 1) / 2;
+    ValueType *array = new ValueType[N + 1];
+    for ( ValueType i = 0; i < N; ++i )
+        array[i] = i + 1;
+    tbb::blocked_range<ValueType*> range(array, array + N);
+    ValueType r1 = tbb::parallel_reduce( range, I, Accumulator(), Sum() );
+    ASSERT( r1 == R, NULL );
+#if __TBB_LAMBDAS_PRESENT
+    ValueType r2 = tbb::parallel_reduce( range, I, 
+        [](const tbb::blocked_range<ValueType*>& r, ValueType value) -> ValueType { 
+            for ( ValueType* pv = r.begin(); pv != r.end(); ++pv )
+                value += *pv;
+            return value;
+        },
+        Sum()
+    );
+    ASSERT( r2 == R, NULL );
+#endif /* LAMBDAS */
+    delete array;
+}
+
+#include "tbb/task_scheduler_init.h"
+#include "harness_cpu.h"
+
+int TestMain () {
+    if( MinThread<0 ) {
+        REPORT("Usage: nthread must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        Flog(p);
+        if( p>=2 )
+            FlogWithInterference(p);
+        ParallelSum();
+        // Test that all workers sleep when no work
+        TestCPUUserTime(p);
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_scan.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_scan.cpp
new file mode 100644 (file)
index 0000000..ac9f243
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/parallel_scan.h"
+#include "tbb/blocked_range.h"
+#include "harness_assert.h"
+
+typedef tbb::blocked_range<long> Range;
+
+static volatile bool ScanIsRunning = false;
+
+//! Sum of 0..i with wrap around on overflow.
+inline int TriangularSum( int i ) {
+    return i&1 ? ((i>>1)+1)*i : (i>>1)*(i+1); 
+}
+
+//! Verify that sum is sum of integers in closed interval [start_index..finish_index].
+/** line should be the source line of the caller */
+static void VerifySum( long start_index, long finish_index, int sum, int line );
+
+const int MAXN = 2000;
+
+enum AddendFlag {
+    UNUSED=0,
+    USED_NONFINAL=1,
+    USED_FINAL=2
+};
+
+//! Array recording how each addend was used. 
+/** 'unsigned char' instead of AddendFlag for sake of compactness. */
+static unsigned char AddendHistory[MAXN];
+
+//! Set to 1 for debugging output 
+#define PRINT_DEBUG 0
+
+#include "tbb/atomic.h"
+#if PRINT_DEBUG
+#include <stdio.h>
+tbb::atomic<long> NextBodyId;
+#endif /* PRINT_DEBUG */
+
+struct BodyId {
+#if PRINT_DEBUG
+    const int id;
+    BodyId() : id(NextBodyId++) {}
+#endif /* PRINT_DEBUG */
+};
+
+tbb::atomic<long> NumberOfLiveAccumulator;
+
+static void Snooze( bool scan_should_be_running ) {
+    ASSERT( ScanIsRunning==scan_should_be_running, NULL );
+}
+
+template<typename T>
+class Accumulator: BodyId {
+    T my_total;
+    const T* my_array;
+    T* my_sum;
+    Range my_range;
+    //! Equals this while object is fully constructed, NULL otherwise.
+    /** Used to detect premature destruction and accidental bitwise copy. */
+    Accumulator* self;
+    Accumulator( const T array[], T sum[] ) :
+        my_total(), my_array(array), my_sum(sum), my_range(-1,-1,1)
+    {
+        ++NumberOfLiveAccumulator;
+        // Set self as last action of constructor, to indicate that object is fully constructed.
+        self = this;
+    }
+    friend void TestAccumulator( int mode, int nthread );
+public:
+#if PRINT_DEBUG
+    void print() const {
+        REPORT("%d [%ld..%ld)\n", id,my_range.begin(),my_range.end() );
+    }
+#endif /* PRINT_DEBUG */
+    ~Accumulator() {
+#if PRINT_DEBUG
+        REPORT("%d [%ld..%ld) destroyed\n",id,my_range.begin(),my_range.end() ); 
+#endif /* PRINT_DEBUG */
+        // Clear self as first action of destructor, to indicate that object is not fully constructed.
+        self = 0;
+        --NumberOfLiveAccumulator;
+    }
+    Accumulator( Accumulator& a, tbb::split ) : 
+        my_total(0), my_array(a.my_array), my_sum(a.my_sum), my_range(-1,-1,1)  
+    {
+        ++NumberOfLiveAccumulator;
+#if PRINT_DEBUG
+        REPORT("%d forked from %d\n",id,a.id);
+#endif /* PRINT_DEBUG */
+        Snooze(true);
+        // Set self as last action of constructor, to indicate that object is fully constructed.
+        self = this;
+    }
+    template<typename Tag> 
+    void operator()( const Range& r, Tag /*tag*/ ) {
+        Snooze(true);
+#if PRINT_DEBUG
+        if( my_range.empty() )
+            REPORT("%d computing %s [%ld..%ld)\n",id,Tag::is_final_scan()?"final":"lookahead",r.begin(),r.end() );
+        else
+            REPORT("%d computing %s [%ld..%ld) [%ld..%ld)\n",id,Tag::is_final_scan()?"final":"lookahead",my_range.begin(),my_range.end(),r.begin(),r.end());
+#endif /* PRINT_DEBUG */
+        ASSERT( !Tag::is_final_scan() || (my_range.begin()==0 && my_range.end()==r.begin()) || (my_range.empty() && r.begin()==0), NULL );
+        for( long i=r.begin(); i<r.end(); ++i ) {
+            my_total += my_array[i];
+            if( Tag::is_final_scan() ) {
+                ASSERT( AddendHistory[i]<USED_FINAL, "addend used 'finally' twice?" );
+                AddendHistory[i] |= USED_FINAL;
+                my_sum[i] = my_total;
+                VerifySum( 0L, i, int(my_sum[i]), __LINE__ );
+            } else {
+                ASSERT( AddendHistory[i]==UNUSED, "addend used too many times" );
+                AddendHistory[i] |= USED_NONFINAL;
+            }   
+        }
+        if( my_range.empty() )
+            my_range = r;
+        else
+            my_range = Range(my_range.begin(), r.end(), 1 );
+        Snooze(true);
+        ASSERT( self==this, "this Accumulator corrupted or prematurely destroyed" );
+    }
+    void reverse_join( const Accumulator& left ) {
+#if PRINT_DEBUG
+        REPORT("reverse join %d [%ld..%ld) %d [%ld..%ld)\n",
+               left.id,left.my_range.begin(),left.my_range.end(),
+               id,my_range.begin(),my_range.end());
+#endif /* PRINT_DEBUG */
+        Snooze(true);
+        ASSERT( ScanIsRunning, NULL );     
+        ASSERT( left.my_range.end()==my_range.begin(), NULL );
+        my_total += left.my_total;
+        my_range = Range( left.my_range.begin(), my_range.end(), 1 );
+        ASSERT( ScanIsRunning, NULL );     
+        Snooze(true);
+        ASSERT( ScanIsRunning, NULL );     
+        ASSERT( self==this, NULL );
+        ASSERT( left.self==&left, NULL );
+    }
+    void assign( const Accumulator& other ) {
+        my_total = other.my_total;
+        my_range = other.my_range;
+        ASSERT( self==this, NULL );
+        ASSERT( other.self==&other, "other Accumulator corrupted or prematurely destroyed" );
+    }
+};
+
+#include "tbb/tick_count.h"
+#include "harness.h"
+
+static void VerifySum( long start_index, long finish_index, int sum, int line ) {
+    int expected = TriangularSum( finish_index ) - TriangularSum( start_index );
+    if( expected!=sum ) {
+        REPORT( "line %d: sum[%ld..%ld] should be = %d, but was computed as %d\n",
+                line, start_index, finish_index, expected, sum );
+        abort();
+    }
+}
+
+void TestAccumulator( int mode, int nthread ) {
+    typedef int T;
+    T* addend = new T[MAXN];
+    T* sum = new T[MAXN];
+    for( long n=0; n<=MAXN; ++n ) {
+        for( long i=0; i<MAXN; ++i ) {
+            addend[i] = -1;
+            sum[i] = -2;
+            AddendHistory[i] = UNUSED;
+        }
+        for( long i=0; i<n; ++i )
+            addend[i] = i;
+        Accumulator<T> acc( addend, sum );
+        tbb::tick_count t0 = tbb::tick_count::now();
+#if PRINT_DEBUG
+        REPORT("--------- mode=%d range=[0..%ld)\n",mode,n);
+#endif /* PRINT_DEBUG */
+        ScanIsRunning = true;
+
+        switch (mode) {
+            case 0:
+                tbb::parallel_scan( Range( 0, n, 1 ), acc );
+            break;
+            case 1:
+                tbb::parallel_scan( Range( 0, n, 1 ), acc, tbb::simple_partitioner() );
+            break;
+            case 2:
+                tbb::parallel_scan( Range( 0, n, 1 ), acc, tbb::auto_partitioner() );
+            break;
+        }
+
+        ScanIsRunning = false;
+#if PRINT_DEBUG
+        REPORT("=========\n");
+#endif /* PRINT_DEBUG */
+        Snooze(false);
+        tbb::tick_count t1 = tbb::tick_count::now();
+        long used_once_count = 0;
+        for( long i=0; i<n; ++i ) 
+            if( !(AddendHistory[i]&USED_FINAL) ) {
+                REPORT("failed to use addend[%ld] %s\n",i,AddendHistory[i]&USED_NONFINAL?"(but used nonfinal)":"");
+            }
+        for( long i=0; i<n; ++i ) {
+            VerifySum( 0, i, sum[i], __LINE__ );
+            used_once_count += AddendHistory[i]==USED_FINAL;
+        }
+        if( n )
+            ASSERT( acc.my_total==sum[n-1], NULL );
+        else
+            ASSERT( acc.my_total==0, NULL );
+        REMARK("time [n=%ld] = %g\tused_once%% = %g\tnthread=%d\n",n,(t1-t0).seconds(), n==0 ? 0 : 100.0*used_once_count/n,nthread);
+    }
+    delete[] addend;
+    delete[] sum;
+}
+
+static void TestScanTags() {
+    ASSERT( tbb::pre_scan_tag::is_final_scan()==false, NULL );
+    ASSERT( tbb::final_scan_tag::is_final_scan()==true, NULL );
+}
+
+#include "tbb/task_scheduler_init.h"
+#include "harness_cpu.h"
+
+int TestMain () {
+    TestScanTags();
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        for (int mode = 0; mode < 3; mode++) {
+            tbb::task_scheduler_init init(p);
+            NumberOfLiveAccumulator = 0;
+            TestAccumulator(mode, p);
+
+            // Test that all workers sleep when no work
+            TestCPUUserTime(p);
+
+            // Checking has to be done late, because when parallel_scan makes copies of
+            // the user's "Body", the copies might be destroyed slightly after parallel_scan 
+            // returns.
+            ASSERT( NumberOfLiveAccumulator==0, NULL );
+        }
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_sort.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_sort.cpp
new file mode 100644 (file)
index 0000000..edf4a16
--- /dev/null
@@ -0,0 +1,529 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/parallel_sort.h"
+#include "tbb/task_scheduler_init.h"
+#include "tbb/concurrent_vector.h"
+#include "harness.h"
+#include <math.h>
+#include <exception>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <algorithm>
+#include <iterator>
+#include <functional>
+#include <string>
+#include <cstring>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+/** Has tightly controlled interface so that we can verify
+    that parallel_sort uses only the required interface. */
+class Minimal {
+    int val;
+public:
+    Minimal() {}
+    void set_val(int i) { val = i; }
+    static bool CompareWith (const Minimal &a, const Minimal &b) { 
+        return (a.val < b.val);
+    }
+    static bool AreEqual( Minimal &a,  Minimal &b) { 
+       return a.val == b.val; 
+    }
+};
+
+//! Defines a comparison function object for Minimal 
+class MinimalCompare {
+public:
+    bool operator() (const Minimal &a, const Minimal &b) const { 
+        return Minimal::CompareWith(a,b);
+    }
+};
+
+//! The default validate; but it uses operator== which is not required
+template<typename RandomAccessIterator>
+bool Validate(RandomAccessIterator a, RandomAccessIterator b, size_t n) {
+    for (size_t i = 0; i < n; i++) {
+        ASSERT( a[i] == b[i], NULL );
+    }
+    return true;
+} 
+
+//! A Validate specialized to string for debugging-only 
+template<>
+bool Validate<std::string *>(std::string * a, std::string * b, size_t n) {
+    for (size_t i = 0; i < n; i++) {
+        if ( Verbose && a[i] != b[i]) {
+          for (size_t j = 0; j < n; j++) {
+              REPORT("a[%llu] == %s and b[%llu] == %s\n", static_cast<unsigned long long>(j), a[j].c_str(), static_cast<unsigned long long>(j), b[j].c_str());
+          }
+        }
+        ASSERT( a[i] == b[i], NULL );
+    }
+    return true;
+}
+
+//! A Validate specialized to Minimal since it does not define an operator== 
+template<>
+bool Validate<Minimal *>(Minimal *a, Minimal *b, size_t n) {
+    for (size_t i = 0; i < n; i++) {
+        ASSERT( Minimal::AreEqual(a[i],b[i]), NULL );
+    }
+    return true;
+} 
+
+//! A Validate specialized to concurrent_vector<Minimal> since it does not define an operator== 
+template<>
+bool Validate<tbb::concurrent_vector<Minimal>::iterator>(tbb::concurrent_vector<Minimal>::iterator a, 
+                                                         tbb::concurrent_vector<Minimal>::iterator b, size_t n) {
+    for (size_t i = 0; i < n; i++) {
+        ASSERT( Minimal::AreEqual(a[i],b[i]), NULL );
+    }
+    return true;
+}
+
+//! used in Verbose mode for identifying which data set is being used 
+static std::string test_type; 
+
+//! The default initialization routine.
+/*! This routine assumes that you can assign to the elements from a float.
+    It assumes that iter and sorted_list have already been allocated. It fills
+    them according to the current data set (tracked by a local static variable).
+    Returns true if a valid test has been setup, or false if there is no test to
+    perform.
+*/ 
+   
+template < typename RandomAccessIterator, typename Compare >
+bool init_iter(RandomAccessIterator iter, RandomAccessIterator sorted_list, size_t n, const Compare &compare, bool reset) {
+    static char test_case = 0; 
+    const char num_cases = 3; 
+
+    if (reset) test_case = 0;
+
+    if (test_case < num_cases) {
+        // switch on the current test case, filling the iter and sorted_list appropriately
+        switch(test_case) {
+            case 0:
+                /* use sin to generate the values */
+                test_type = "sin";
+                for (size_t i = 0; i < n; i++) 
+                    iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(sin(float(i)));
+                break;
+            case 1:
+                /* presorted list */ 
+                test_type = "pre-sorted";
+                for (size_t i = 0; i < n; i++) 
+                    iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(i);
+                break;
+            case 2:
+                /* reverse-sorted list */ 
+                test_type = "reverse-sorted";
+                for (size_t i = 0; i < n; i++) 
+                    iter[i] = sorted_list[i] = static_cast<typename std::iterator_traits< RandomAccessIterator >::value_type>(n - i);
+                break;
+        } 
+
+        // pre-sort sorted_list for later validity testing 
+        std::sort(sorted_list, sorted_list + n, compare);
+        test_case++;
+        return true;
+    }
+    return false;
+}
+
+template < typename T, typename Compare >
+bool init_iter(T * iter, T * sorted_list, size_t n, const Compare &compare, bool reset) {
+    static char test_case = 0;
+    const char num_cases = 3;
+
+    if (reset) test_case = 0;
+
+    if (test_case < num_cases) {
+        // switch on the current test case, filling the iter and sorted_list appropriately
+        switch(test_case) {
+            case 0:
+                /* use sin to generate the values */
+                test_type = "sin";
+                for (size_t i = 0; i < n; i++)
+                    iter[i] = sorted_list[i] = T(sin(float(i)));
+                break;
+            case 1:
+                /* presorted list */
+                test_type = "pre-sorted";
+                for (size_t i = 0; i < n; i++)
+                    iter[i] = sorted_list[i] = T(i);
+                break;
+            case 2:
+                /* reverse-sorted list */
+                test_type = "reverse-sorted";
+                for (size_t i = 0; i < n; i++)
+                    iter[i] = sorted_list[i] = T(n - i);
+                break;
+        }
+
+        // pre-sort sorted_list for later validity testing 
+        std::sort(sorted_list, sorted_list + n, compare);
+        test_case++;
+        return true;
+    }
+    return false;
+}
+
+
+//! The initialization routine specialized to the class Minimal 
+/*! Minimal cannot have floats assigned to it.  This function uses the set_val method
+*/
+
+template < >
+bool init_iter(Minimal* iter, Minimal * sorted_list, size_t n, const MinimalCompare &compare, bool reset) {
+    static char test_case = 0;
+    const char num_cases = 3; 
+
+    if (reset) test_case = 0;
+
+    if (test_case < num_cases) {
+        switch(test_case) {
+            case 0:
+                /* use sin to generate the values */
+                test_type = "sin";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int( sin( float(i) ) * 1000.f) );
+                    sorted_list[i].set_val( int ( sin( float(i) ) * 1000.f) );
+                }
+                break;
+            case 1:
+                /* presorted list */ 
+                test_type = "pre-sorted";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int(i) ); 
+                    sorted_list[i].set_val( int(i) );
+                }
+                break;
+            case 2:
+                /* reverse-sorted list */ 
+                test_type = "reverse-sorted";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int(n-i) ); 
+                    sorted_list[i].set_val( int(n-i) );
+                }
+                break;
+        } 
+        std::sort(sorted_list, sorted_list + n, compare);
+        test_case++;
+        return true;
+    }
+    return false;
+}
+
+//! The initialization routine specialized to the class concurrent_vector<Minimal> 
+/*! Minimal cannot have floats assigned to it.  This function uses the set_val method
+*/
+
+template < >
+bool init_iter(tbb::concurrent_vector<Minimal>::iterator iter, tbb::concurrent_vector<Minimal>::iterator sorted_list, 
+               size_t n, const MinimalCompare &compare, bool reset) {
+    static char test_case = 0;
+    const char num_cases = 3;
+
+    if (reset) test_case = 0;
+
+    if (test_case < num_cases) {
+        switch(test_case) {
+            case 0:
+                /* use sin to generate the values */
+                test_type = "sin";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int( sin( float(i) ) * 1000.f) );
+                    sorted_list[i].set_val( int ( sin( float(i) ) * 1000.f) );
+                }
+                break;
+            case 1:
+                /* presorted list */
+                test_type = "pre-sorted";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int(i) );
+                    sorted_list[i].set_val( int(i) );
+                }
+                break;
+            case 2:
+                /* reverse-sorted list */
+                test_type = "reverse-sorted";
+                for (size_t i = 0; i < n; i++) {
+                    iter[i].set_val( int(n-i) );
+                    sorted_list[i].set_val( int(n-i) );
+                }
+                break;
+        }
+        std::sort(sorted_list, sorted_list + n, compare);
+        test_case++;
+        return true;
+    }
+    return false;
+}
+
+//! The initialization routine specialized to the class string 
+/*! strings are created from floats. 
+*/
+
+template<>
+bool init_iter(std::string *iter, std::string *sorted_list, size_t n, const std::less<std::string> &compare, bool reset) {
+    static char test_case = 0;
+    const char num_cases = 1; 
+    if (reset) test_case = 0;
+
+    if (test_case < num_cases) {
+        switch(test_case) {
+            case 0:
+                /* use sin to generate the values */
+                test_type = "sin";
+                for (size_t i = 0; i < n; i++) {
+                    char buffer[20];
+#if __STDC_SECURE_LIB__>=200411 && !__MINGW64__
+                    sprintf_s(buffer, sizeof(buffer), "%f", float(sin(float(i))));
+#else
+                    sprintf(buffer, "%f", float(sin(float(i))));
+#endif /* _MSC_VER>=1400 */
+                    sorted_list[i] = iter[i] = std::string(buffer);
+                }
+                break;
+        } 
+        std::sort(sorted_list, sorted_list + n, compare);
+        test_case++;
+        return true;
+    }
+    return false;
+}
+
+//! The current number of threads in use (for Verbose only) 
+static size_t current_p;
+
+//! The current data type being sorted (for Verbose only) 
+static std::string current_type; 
+
+//! The default test routine.
+/*! Tests all data set sizes from 0 to N, all grainsizes from 0 to G=10, and selects from 
+    all possible interfaces to parallel_sort depending on whether a scratch space and
+    compare have been provided.
+*/
+template<typename RandomAccessIterator, typename Compare>
+bool parallel_sortTest(size_t n, RandomAccessIterator iter, RandomAccessIterator sorted_list, const Compare *comp) {
+    bool passed = true;
+
+    Compare local_comp; 
+
+    init_iter(iter, sorted_list, n, local_comp, true);
+    do {
+        REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(), 
+                   static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n));
+        if (comp != NULL) {
+            tbb::parallel_sort(iter, iter + n, local_comp );
+         } else {
+            tbb::parallel_sort(iter, iter + n );
+         }
+        if (!Validate(iter, sorted_list, n)) 
+            passed = false;
+        REMARK("passed\n");
+    } while (init_iter(iter, sorted_list, n, local_comp, false));
+    return passed;
+}
+
+//! The test routine specialize to Minimal, since it does not have a less defined for it
+template<>
+bool parallel_sortTest(size_t n, Minimal * iter, Minimal * sorted_list, const MinimalCompare *compare) {
+    bool passed = true;
+
+    if (compare == NULL) return passed;
+
+    init_iter(iter, sorted_list, n, *compare, true);
+    do {
+        REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(),
+                    static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n));
+
+        tbb::parallel_sort(iter, iter + n, *compare );
+
+        if (!Validate(iter, sorted_list, n))
+            passed = false;
+        REMARK("passed\n");
+    } while (init_iter(iter, sorted_list, n, *compare, false));
+    return passed;
+}
+
+//! The test routine specialize to concurrent_vector of Minimal, since it does not have a less defined for it
+template<>
+bool parallel_sortTest(size_t n, tbb::concurrent_vector<Minimal>::iterator iter, 
+                       tbb::concurrent_vector<Minimal>::iterator sorted_list, const MinimalCompare *compare) {
+    bool passed = true;
+
+    if (compare == NULL) return passed;
+    
+    init_iter(iter, sorted_list, n, *compare, true);
+    do {
+        REMARK("%s %s p=%llu n=%llu :",current_type.c_str(), test_type.c_str(),
+                    static_cast<unsigned long long>(current_p), static_cast<unsigned long long>(n));
+    
+        tbb::parallel_sort(iter, iter + n, *compare );
+
+        if (!Validate(iter, sorted_list, n))
+            passed = false;
+        REMARK("passed\n");
+    } while (init_iter(iter, sorted_list, n, *compare, false));
+    return passed;
+}
+
+//! The main driver for the tests.
+/*! Minimal, float and string types are used.  All interfaces to parallel_sort that are usable
+    by each type are tested.
+*/
+void Flog() {
+    // For each type create: 
+    // the list to be sorted by parallel_sort (array) 
+    // the list to be sort by STL sort (array_2)
+    // and a less function object
+
+    const size_t N = 50000;
+    
+    Minimal *minimal_array = new Minimal[N];
+    Minimal *minimal_array_2 = new Minimal[N];
+    MinimalCompare minimal_less;
+
+    float *float_array = new float[N];
+    float *float_array_2 = new float[N];
+    std::less<float> float_less;
+
+    tbb::concurrent_vector<float> float_cv1;
+    tbb::concurrent_vector<float> float_cv2;
+    float_cv1.grow_to_at_least(N);
+    float_cv2.grow_to_at_least(N);
+
+    std::string *string_array = new std::string[N];
+    std::string *string_array_2 = new std::string[N];
+    std::less<std::string> string_less;
+
+    tbb::concurrent_vector<Minimal> minimal_cv1;
+    tbb::concurrent_vector<Minimal> minimal_cv2;
+    minimal_cv1.grow_to_at_least(N);
+    minimal_cv2.grow_to_at_least(N);
+    // run the appropriate tests for each type
+
+    current_type = "Minimal(less)";
+    parallel_sortTest(0, minimal_array, minimal_array_2, &minimal_less);
+    parallel_sortTest(1, minimal_array, minimal_array_2, &minimal_less);
+    parallel_sortTest(10, minimal_array, minimal_array_2, &minimal_less);
+    parallel_sortTest(9999, minimal_array, minimal_array_2, &minimal_less);
+    parallel_sortTest(50000, minimal_array, minimal_array_2, &minimal_less);
+
+    current_type = "float (no less)";
+    parallel_sortTest(0, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); 
+    parallel_sortTest(1, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); 
+    parallel_sortTest(10, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); 
+    parallel_sortTest(9999, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); 
+    parallel_sortTest(50000, float_array, float_array_2, static_cast<std::less<float> *>(NULL)); 
+
+    current_type = "float (less)";
+    parallel_sortTest(0, float_array, float_array_2, &float_less); 
+    parallel_sortTest(1, float_array, float_array_2, &float_less); 
+    parallel_sortTest(10, float_array, float_array_2, &float_less); 
+    parallel_sortTest(9999, float_array, float_array_2, &float_less); 
+    parallel_sortTest(50000, float_array, float_array_2, &float_less); 
+
+    current_type = "concurrent_vector<float> (no less)";
+    parallel_sortTest(0, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL));
+    parallel_sortTest(1, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL));
+    parallel_sortTest(10, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL));
+    parallel_sortTest(9999, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL));
+    parallel_sortTest(50000, float_cv1.begin(), float_cv2.begin(), static_cast<std::less<float> *>(NULL));
+
+    current_type = "concurrent_vector<float> (less)";
+    parallel_sortTest(0, float_cv1.begin(), float_cv2.begin(), &float_less);
+    parallel_sortTest(1, float_cv1.begin(), float_cv2.begin(), &float_less);
+    parallel_sortTest(10, float_cv1.begin(), float_cv2.begin(), &float_less);
+    parallel_sortTest(9999, float_cv1.begin(), float_cv2.begin(), &float_less);
+    parallel_sortTest(50000, float_cv1.begin(), float_cv2.begin(), &float_less);
+
+    current_type = "string (no less)";
+    parallel_sortTest(0, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL));
+    parallel_sortTest(1, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL));
+    parallel_sortTest(10, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL));
+    parallel_sortTest(9999, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL));
+    parallel_sortTest(50000, string_array, string_array_2, static_cast<std::less<std::string> *>(NULL));
+
+    current_type = "string (less)";
+    parallel_sortTest(0, string_array, string_array_2, &string_less);
+    parallel_sortTest(1, string_array, string_array_2, &string_less);
+    parallel_sortTest(10, string_array, string_array_2, &string_less);
+    parallel_sortTest(9999, string_array, string_array_2, &string_less);
+    parallel_sortTest(50000, string_array, string_array_2, &string_less);
+
+    current_type = "concurrent_vector<Minimal> (less)";
+    parallel_sortTest(0, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less);
+    parallel_sortTest(1, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less);
+    parallel_sortTest(10, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less);
+    parallel_sortTest(9999, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less);
+    parallel_sortTest(50000, minimal_cv1.begin(), minimal_cv2.begin(), &minimal_less);
+
+    delete [] minimal_array;
+    delete [] minimal_array_2;
+
+    delete [] float_array;
+    delete [] float_array_2;
+
+    delete [] string_array;
+    delete [] string_array_2;
+}
+
+#include <cstdio>
+#include "harness_cpu.h"
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("Usage: number of threads must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        if( p>0 ) {
+            tbb::task_scheduler_init init( p );
+            current_p = p;
+            Flog();
+
+            // Test that all workers sleep when no work
+            TestCPUUserTime(p);
+        }
+    } 
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_while.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_parallel_while.cpp
new file mode 100644 (file)
index 0000000..88f7997
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/parallel_while.h"
+#include "harness.h"
+
+const int N = 200;
+
+typedef int Element;
+
+//! Representation of an array index with only those signatures required by parallel_while.
+class MinimalArgumentType {
+    void operator=( const MinimalArgumentType& );
+    long my_value;
+    enum {
+        DEAD=0xDEAD,
+        LIVE=0x2718,
+        INITIALIZED=0x3141 
+    } my_state;
+public:
+    ~MinimalArgumentType() {
+        ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL );
+        my_state = DEAD;
+    }
+    MinimalArgumentType() {
+        my_state = LIVE;
+    }
+    void set_value( long i ) {
+        ASSERT( my_state==LIVE||my_state==INITIALIZED, NULL );
+        my_value = i;
+        my_state = INITIALIZED;
+    }
+    long get_value() const {
+        ASSERT( my_state==INITIALIZED, NULL );
+        return my_value;
+    } 
+};
+
+class IntegerStream {
+    long my_limit;
+    long my_index;
+public:
+    IntegerStream( long n ) : my_limit(n), my_index(0) {}
+    bool pop_if_present( MinimalArgumentType& v ) {
+        if( my_index>=my_limit ) 
+            return false;
+        v.set_value( my_index );
+        my_index+=2;
+        return true;
+    }
+};
+
+class MatrixMultiplyBody: NoAssign {
+    Element (*a)[N];
+    Element (*b)[N];
+    Element (*c)[N];
+    const int n;
+    tbb::parallel_while<MatrixMultiplyBody>& my_while;
+public:
+    typedef MinimalArgumentType argument_type;
+    void operator()( argument_type i_arg ) const {
+        long i = i_arg.get_value();
+        if( (i&1)==0 && i+1<N ) {
+            MinimalArgumentType value;
+            value.set_value(i+1);
+            my_while.add( value );
+        }
+        for( int j=0; j<n; ++j )    
+            c[i][j] = 0;
+        for( int k=0; k<n; ++k ) {
+            Element aik = a[i][k];
+            for( int j=0; j<n; ++j )    
+                c[i][j] += aik*b[k][j];
+        }
+    }
+    MatrixMultiplyBody( tbb::parallel_while<MatrixMultiplyBody>& w, Element c_[N][N], Element a_[N][N], Element b_[N][N], int n_ ) :
+        a(a_), b(b_), c(c_), n(n_),  my_while(w)
+    {}
+};
+
+void WhileMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) {
+    IntegerStream stream( N );
+    tbb::parallel_while<MatrixMultiplyBody> w;
+    MatrixMultiplyBody body(w,c,a,b,n);
+    w.run( stream, body );
+}
+
+#include "tbb/tick_count.h"
+#include <cstdlib>
+#include <cstdio>
+using namespace std;
+
+static long Iterations = 5;
+
+static void SerialMatrixMultiply( Element c[N][N], Element a[N][N], Element b[N][N], int n ) {
+    for( int i=0; i<n; ++i ) {   
+        for( int j=0; j<n; ++j )    
+            c[i][j] = 0;
+        for( int k=0; k<n; ++k ) {
+            Element aik = a[i][k];
+            for( int j=0; j<n; ++j )    
+                c[i][j] += aik*b[k][j];
+        }
+    }
+}
+
+static void InitializeMatrix( Element x[N][N], int n, int salt ) {
+    for( int i=0; i<n; ++i )
+        for( int j=0; j<n; ++j )
+            x[i][j] = (i*n+j)^salt;
+}
+
+static Element A[N][N], B[N][N], C[N][N], D[N][N];
+
+static void Run( int nthread, int n ) {
+    /* Initialize matrices */
+    InitializeMatrix(A,n,5);
+    InitializeMatrix(B,n,10);
+    InitializeMatrix(C,n,0);
+    InitializeMatrix(D,n,15);
+
+    tbb::tick_count t0 = tbb::tick_count::now();
+    for( long i=0; i<Iterations; ++i ) {
+        WhileMatrixMultiply( C, A, B, n );
+    }
+    tbb::tick_count t1 = tbb::tick_count::now();
+    SerialMatrixMultiply( D, A, B, n );
+
+    // Check result
+    for( int i=0; i<n; ++i )   
+        for( int j=0; j<n; ++j )    
+            ASSERT( C[i][j]==D[i][j], NULL );
+    REMARK("time=%g\tnthread=%d\tn=%d\n",(t1-t0).seconds(),nthread,n);
+}
+
+#include "tbb/task_scheduler_init.h"
+#include "harness_cpu.h"
+
+int TestMain () {
+    if( MinThread<1 ) {
+        REPORT("number of threads must be positive\n");
+        exit(1);
+    }
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        tbb::task_scheduler_init init( p );
+        for( int n=N/4; n<=N; n+=N/4 )
+            Run(p,n);
+
+        // Test that all workers sleep when no work
+        TestCPUUserTime(p);
+    }
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline.cpp
new file mode 100644 (file)
index 0000000..642d87c
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_stddef.h"
+#include "tbb/pipeline.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/atomic.h"
+#include <cstdlib>
+#include <cstdio>
+#include "harness.h"
+
+// In the test, variables related to token counting are declared
+// as unsigned long to match definition of tbb::internal::Token.
+
+struct Buffer {
+    //! Indicates that the buffer is not used.
+    static const unsigned long unused = ~0ul;
+    unsigned long id;
+    //! True if Buffer is in use.
+    bool is_busy;
+    unsigned long sequence_number;
+    Buffer() : id(unused), is_busy(false), sequence_number(unused) {}
+};
+
+class waiting_probe {
+    size_t check_counter;
+public:
+    waiting_probe() : check_counter(0) {}
+    bool required( ) {
+        ++check_counter;
+        return !((check_counter+1)&size_t(0x7FFF));
+    }
+    void probe( ); // defined below
+};
+
+static const unsigned MaxStreamSize = 8000;
+static const unsigned MaxStreamItemsPerThread = 1000;
+//! Maximum number of filters allowed
+static const unsigned MaxFilters = 5;
+static unsigned StreamSize;
+static const unsigned MaxBuffer = 8;
+static bool Done[MaxFilters][MaxStreamSize];
+static waiting_probe WaitTest;
+static unsigned out_of_order_count;
+
+#include "harness_concurrency_tracker.h"
+
+class BaseFilter: public tbb::filter {
+    bool* const my_done;
+    const bool my_is_last;  
+    bool my_is_running;
+public:
+    tbb::atomic<tbb::internal::Token> current_token;
+    BaseFilter( tbb::filter::mode type, bool done[], bool is_last ) : 
+        filter(type),
+        my_done(done),
+        my_is_last(is_last),
+        my_is_running(false),
+        current_token()
+    {}
+    virtual Buffer* get_buffer( void* item ) {
+        current_token++;
+        return static_cast<Buffer*>(item);
+    } 
+    /*override*/void* operator()( void* item ) {
+        Harness::ConcurrencyTracker ct;
+        if( is_serial() )
+            ASSERT( !my_is_running, "premature entry to serial stage" );
+        my_is_running = true;
+        Buffer* b = get_buffer(item);
+        if( b ) {
+            if( is_ordered() ) {
+                if( b->sequence_number == Buffer::unused ) 
+                    b->sequence_number = current_token-1;
+                else
+                    ASSERT( b->sequence_number==current_token-1, "item arrived out of order" );
+            } else if( is_serial() ) {
+                if( b->sequence_number != current_token-1 && b->sequence_number != Buffer::unused )
+                    out_of_order_count++;
+            }
+            ASSERT( b->id < StreamSize, NULL ); 
+            ASSERT( !my_done[b->id], "duplicate processing of token?" ); 
+            ASSERT( b->is_busy, NULL );
+            my_done[b->id] = true;
+            if( my_is_last ) {
+                b->id = Buffer::unused;
+                b->sequence_number = Buffer::unused;
+                __TBB_store_with_release(b->is_busy, false);
+            }
+        }
+        my_is_running = false;
+        return b;  
+    }
+};
+
+class InputFilter: public BaseFilter {
+    tbb::spin_mutex input_lock;
+    Buffer buffer[MaxBuffer];
+    const tbb::internal::Token my_number_of_tokens;
+public:
+    InputFilter( tbb::filter::mode type, tbb::internal::Token ntokens, bool done[], bool is_last ) :
+        BaseFilter(type, done, is_last),
+        my_number_of_tokens(ntokens)
+    {}
+    /*override*/Buffer* get_buffer( void* ) {
+        unsigned long next_input;
+        unsigned free_buffer = 0; 
+        { // lock protected scope
+            tbb::spin_mutex::scoped_lock lock(input_lock);
+            if( current_token>=StreamSize )
+                return NULL;
+            next_input = current_token++; 
+            // once in a while, emulate waiting for input; this only makes sense for serial input
+            if( is_serial() && WaitTest.required() )
+                WaitTest.probe( );
+            while( free_buffer<MaxBuffer )
+                if( __TBB_load_with_acquire(buffer[free_buffer].is_busy) )
+                    ++free_buffer;
+                else {
+                    buffer[free_buffer].is_busy = true;
+                    break;
+                }
+        }
+        ASSERT( free_buffer<my_number_of_tokens, "premature reuse of buffer" );
+        Buffer* b = &buffer[free_buffer]; 
+        ASSERT( &buffer[0] <= b, NULL ); 
+        ASSERT( b <= &buffer[MaxBuffer-1], NULL ); 
+        ASSERT( b->id == Buffer::unused, NULL);
+        b->id = next_input;
+        ASSERT( b->sequence_number == Buffer::unused, NULL);
+        return b;
+    }
+};
+
+//! The struct below repeats layout of tbb::pipeline.
+struct hacked_pipeline {
+    tbb::filter* filter_list;
+    tbb::filter* filter_end;
+    tbb::empty_task* end_counter;
+    tbb::atomic<tbb::internal::Token> input_tokens;
+    tbb::atomic<tbb::internal::Token> token_counter;
+    bool end_of_input;
+    bool has_thread_bound_filters;
+
+    virtual ~hacked_pipeline();
+};
+
+//! The struct below repeats layout of tbb::internal::input_buffer.
+struct hacked_input_buffer {
+    void* array; // This should be changed to task_info* if ever used
+       void* my_sem; // This should be changed to semaphore* if ever used
+    tbb::internal::Token array_size;
+    tbb::internal::Token low_token;
+    tbb::spin_mutex array_mutex;
+    tbb::internal::Token high_token;
+    bool is_ordered;
+    bool is_bound;
+};
+
+//! The struct below repeats layout of tbb::filter.
+struct hacked_filter {
+    tbb::filter* next_filter_in_pipeline;
+    hacked_input_buffer* my_input_buffer;
+    unsigned char my_filter_mode;
+    tbb::filter* prev_filter_in_pipeline;
+    tbb::pipeline* my_pipeline;
+    tbb::filter* next_segment;
+
+    virtual ~hacked_filter();
+};
+
+bool do_hacking_tests = true;
+const tbb::internal::Token tokens_before_wraparound = 0xF;
+
+void TestTrivialPipeline( unsigned nthread, unsigned number_of_filters ) {
+    // There are 3 filter types: parallel, serial_in_order and serial_out_of_order 
+    static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order}; 
+    const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]);
+    REMARK( "testing with %lu threads and %lu filters\n", nthread, number_of_filters );
+    ASSERT( number_of_filters<=MaxFilters, "too many filters" );
+    ASSERT( sizeof(hacked_pipeline) == sizeof(tbb::pipeline), "layout changed for tbb::pipeline?" );
+    ASSERT( sizeof(hacked_filter) == sizeof(tbb::filter), "layout changed for tbb::filter?" );
+    tbb::internal::Token ntokens = nthread<MaxBuffer ? nthread : MaxBuffer;
+    // Count maximum iterations number
+    unsigned limit = 1;
+    for( unsigned i=0; i<number_of_filters; ++i)
+        limit *= number_of_filter_types;
+    // Iterate over possible filter sequences
+    for( unsigned numeral=0; numeral<limit; ++numeral ) {
+        // Build pipeline
+        tbb::pipeline pipeline;
+        if( do_hacking_tests ) {
+            // A private member of pipeline is hacked there for sake of testing wrap-around immunity.
+            ((hacked_pipeline*)(void*)&pipeline)->token_counter = ~tokens_before_wraparound;
+        }
+        tbb::filter* filter[MaxFilters];
+        unsigned temp = numeral;
+        // parallelism_limit is the upper bound on the possible parallelism
+        unsigned parallelism_limit = 0;
+        for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) {
+            tbb::filter::mode filter_type = filter_table[temp%number_of_filter_types];
+            const bool is_last = i==number_of_filters-1;
+            if( i==0 )
+                filter[i] = new InputFilter(filter_type,ntokens,Done[i],is_last);
+            else
+                filter[i] = new BaseFilter(filter_type,Done[i],is_last);
+            pipeline.add_filter(*filter[i]);
+            // The ordered buffer of serial filters is hacked as well.
+            if ( filter[i]->is_serial() ) {
+                if( do_hacking_tests ) {
+                    ((hacked_filter*)(void*)filter[i])->my_input_buffer->low_token = ~tokens_before_wraparound;
+                    ((hacked_filter*)(void*)filter[i])->my_input_buffer->high_token = ~tokens_before_wraparound;
+                }
+                parallelism_limit += 1;
+            } else {
+                parallelism_limit = nthread;
+            }
+        }
+        // Account for clipping of parallelism.
+        if( parallelism_limit>nthread ) 
+            parallelism_limit = nthread;
+        if( parallelism_limit>ntokens )
+            parallelism_limit = (unsigned)ntokens;
+        Harness::ConcurrencyTracker::Reset();
+        unsigned streamSizeLimit = min( MaxStreamSize, nthread * MaxStreamItemsPerThread );
+        for( StreamSize=0; StreamSize<=streamSizeLimit; ) {
+            memset( Done, 0, sizeof(Done) );
+            for( unsigned i=0; i<number_of_filters; ++i ) {
+                static_cast<BaseFilter*>(filter[i])->current_token=0;
+            }
+            pipeline.run( ntokens );
+            ASSERT( !Harness::ConcurrencyTracker::InstantParallelism(), "filter still running?" );
+            for( unsigned i=0; i<number_of_filters; ++i )
+                ASSERT( static_cast<BaseFilter*>(filter[i])->current_token==StreamSize, NULL );
+            for( unsigned i=0; i<MaxFilters; ++i )
+                for( unsigned j=0; j<StreamSize; ++j ) {
+                    ASSERT( Done[i][j]==(i<number_of_filters), NULL );
+                }
+            if( StreamSize < min(nthread*8, 32u) ) {
+                ++StreamSize;
+            } else {
+                StreamSize = StreamSize*8/3;
+            }
+        }
+        if( Harness::ConcurrencyTracker::PeakParallelism() < parallelism_limit ) 
+            REMARK( "nthread=%lu ntokens=%lu MaxParallelism=%lu parallelism_limit=%lu\n",
+                nthread, ntokens, Harness::ConcurrencyTracker::PeakParallelism(), parallelism_limit );
+        for( unsigned i=0; i < number_of_filters; ++i ) {
+            delete filter[i];
+            filter[i] = NULL;
+        }
+        pipeline.clear();
+    }
+}
+
+#include "harness_cpu.h"
+
+static int nthread; // knowing number of threads is necessary to call TestCPUUserTime
+
+void waiting_probe::probe( ) {
+    if( nthread==1 ) return;
+    REMARK("emulating wait for input\n");
+    // Test that threads sleep while no work.
+    // The master doesn't sleep so there could be 2 active threads if a worker is waiting for input
+    TestCPUUserTime(nthread, 2);
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    out_of_order_count = 0;
+    if( MinThread<1 ) {
+        REPORT("must have at least one thread");
+        exit(1);
+    }
+    if( tbb::TBB_runtime_interface_version()>TBB_INTERFACE_VERSION) {
+        REMARK("Warning: implementation dependent tests disabled\n");
+        do_hacking_tests = false;
+    }
+
+    // Test with varying number of threads.
+    for( nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        // Initialize TBB task scheduler
+        tbb::task_scheduler_init init(nthread);
+
+        // Test pipelines with n filters
+        for( unsigned n=0; n<=MaxFilters; ++n )
+            TestTrivialPipeline(nthread,n);
+
+        // Test that all workers sleep when no work
+        TestCPUUserTime(nthread);
+    }
+    if( !out_of_order_count )
+        REPORT("Warning: out of order serial filter received tokens in order\n");
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline_with_tbf.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_pipeline_with_tbf.cpp
new file mode 100644 (file)
index 0000000..9cac1e5
--- /dev/null
@@ -0,0 +1,493 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/pipeline.h"
+#include "tbb/spin_mutex.h"
+#include "tbb/atomic.h"
+#include "tbb/tbb_thread.h"
+#include <cstdlib>
+#include <cstdio>
+#include "harness.h"
+
+// In the test, variables related to token counting are declared
+// as unsigned long to match definition of tbb::internal::Token.
+
+//! Id of thread that first executes work on non-thread-bound stages
+tbb::tbb_thread::id thread_id;
+//! Zero thread id
+tbb::tbb_thread::id id0;
+//! True if non-thread-bound stages must be executed on one thread
+bool is_serial_execution;
+double sleeptime; // how long is a non-thread-bound stage to sleep?
+
+struct Buffer {
+    //! Indicates that the buffer is not used.
+    static const unsigned long unused = ~0ul;
+    unsigned long id;
+    //! True if Buffer is in use.
+    bool is_busy;
+    unsigned long sequence_number;
+    Buffer() : id(unused), is_busy(false), sequence_number(unused) {}
+};
+
+class waiting_probe {
+    size_t check_counter;
+public:
+    waiting_probe() : check_counter(0) {}
+    bool required( ) {
+        ++check_counter;
+        return !((check_counter+1)&size_t(0x7FFF));
+    }
+    void probe( ); // defined below
+};
+
+static const unsigned MaxStreamSize = 8000;
+static const unsigned MaxStreamItemsPerThread = 1000;
+//! Maximum number of filters allowed
+static const unsigned MaxFilters = 4;
+static unsigned StreamSize;
+static const unsigned MaxBuffer = 8;
+static bool Done[MaxFilters][MaxStreamSize];
+static waiting_probe WaitTest;
+static unsigned out_of_order_count;
+
+#include "harness_concurrency_tracker.h"
+
+template<typename T>
+class BaseFilter: public T {
+    bool* const my_done;
+    const bool my_is_last;  
+    bool my_is_running;
+public:
+    tbb::atomic<tbb::internal::Token> current_token;
+    BaseFilter( tbb::filter::mode type, bool done[], bool is_last ) : 
+        T(type),
+        my_done(done),
+        my_is_last(is_last),
+        my_is_running(false),
+        current_token()
+    {}
+    virtual Buffer* get_buffer( void* item ) {
+        current_token++;
+        return static_cast<Buffer*>(item);
+    } 
+    /*override*/void* operator()( void* item ) {
+        // Check if work is done only on one thread when ntokens==1 or 
+        // when pipeline has only one filter that is serial and non-thread-bound
+        if( is_serial_execution && !this->is_bound() ) {
+            // Get id of current thread
+            tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
+            // At first execution, set thread_id to current thread id.
+            // Serialized execution is expected, so there should be no race.
+            if( thread_id == id0 )
+                thread_id = id;
+            // Check if work is done on one thread 
+            ASSERT( thread_id == id, "non-thread-bound stages executed on different threads when must be executed on a single one");
+        }
+        Harness::ConcurrencyTracker ct;
+        if( this->is_serial() )
+            ASSERT( !my_is_running, "premature entry to serial stage" );
+        my_is_running = true;
+        Buffer* b = get_buffer(item);
+        if( b ) {
+            if(!this->is_bound() && sleeptime > 0) {
+                Harness::Sleep((int)sleeptime);
+            }
+            if( this->is_ordered() ) {
+                if( b->sequence_number == Buffer::unused ) 
+                    b->sequence_number = current_token-1;
+                else
+                    ASSERT( b->sequence_number==current_token-1, "item arrived out of order" );
+            } else if( this->is_serial() ) {
+                if( b->sequence_number != current_token-1 && b->sequence_number != Buffer::unused )
+                    out_of_order_count++;
+            }
+            ASSERT( b->id < StreamSize, NULL ); 
+            ASSERT( !my_done[b->id], "duplicate processing of token?" ); 
+            ASSERT( b->is_busy, NULL );
+            my_done[b->id] = true;
+            if( my_is_last ) {
+                b->id = Buffer::unused;
+                b->sequence_number = Buffer::unused;
+                __TBB_store_with_release(b->is_busy, false);
+            }
+        }
+        my_is_running = false;
+        return b;  
+    }
+};
+
+template<typename T>
+class InputFilter: public BaseFilter<T> {
+    tbb::spin_mutex input_lock;
+    Buffer buffer[MaxBuffer];
+    const tbb::internal::Token my_number_of_tokens;
+public:
+    InputFilter( tbb::filter::mode type, tbb::internal::Token ntokens, bool done[], bool is_last ) :
+        BaseFilter<T>(type, done, is_last),
+        my_number_of_tokens(ntokens)
+    {}
+    /*override*/Buffer* get_buffer( void* ) {
+        unsigned long next_input;
+        unsigned free_buffer = 0; 
+        { // lock protected scope
+            tbb::spin_mutex::scoped_lock lock(input_lock);
+            if( this->current_token>=StreamSize )
+                return NULL;
+            next_input = this->current_token++; 
+            // once in a while, emulate waiting for input; this only makes sense for serial input
+            if( this->is_serial() && WaitTest.required() )
+                WaitTest.probe( );
+            while( free_buffer<MaxBuffer )
+                if( __TBB_load_with_acquire(buffer[free_buffer].is_busy) )
+                    ++free_buffer;
+                else {
+                    buffer[free_buffer].is_busy = true;
+                    break;
+                }
+        }
+        ASSERT( free_buffer<my_number_of_tokens, "premature reuse of buffer" );
+        Buffer* b = &buffer[free_buffer]; 
+        ASSERT( &buffer[0] <= b, NULL ); 
+        ASSERT( b <= &buffer[MaxBuffer-1], NULL ); 
+        ASSERT( b->id == Buffer::unused, NULL);
+        b->id = next_input;
+        ASSERT( b->sequence_number == Buffer::unused, NULL);
+        return b;
+    }
+};
+
+class process_loop {
+public:
+    void operator()( tbb::thread_bound_filter* tbf ) {
+        tbb::thread_bound_filter::result_type flag;
+        do
+            flag = tbf->process_item();
+        while( flag != tbb::thread_bound_filter::end_of_stream );
+    }
+};
+
+//! The struct below repeats layout of tbb::pipeline.
+struct hacked_pipeline {
+    tbb::filter* filter_list;
+    tbb::filter* filter_end;
+    tbb::empty_task* end_counter;
+    tbb::atomic<tbb::internal::Token> input_tokens;
+    tbb::atomic<tbb::internal::Token> global_token_counter;
+    bool end_of_input;
+    bool has_thread_bound_filters;
+
+    virtual ~hacked_pipeline();
+};
+
+//! The struct below repeats layout of tbb::internal::ordered_buffer.
+struct hacked_ordered_buffer {
+    void* array; // This should be changed to task_info* if ever used
+    tbb::internal::Token array_size;
+    tbb::internal::Token low_token;
+    tbb::spin_mutex array_mutex;
+    tbb::internal::Token high_token;
+    bool is_ordered;
+    bool is_bound;
+};
+
+//! The struct below repeats layout of tbb::filter.
+struct hacked_filter {
+    tbb::filter* next_filter_in_pipeline;
+    hacked_ordered_buffer* input_buffer;
+    unsigned char my_filter_mode;
+    tbb::filter* prev_filter_in_pipeline;
+    tbb::pipeline* my_pipeline;
+    tbb::filter* next_segment;
+
+    virtual ~hacked_filter();
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for overzealous compiler warnings
+    // Suppress compiler warning about constant conditional expression
+    #pragma warning (disable: 4127)
+#endif
+
+void clear_global_state() {
+    Harness::ConcurrencyTracker::Reset();
+    memset( Done, 0, sizeof(Done) );
+    thread_id = id0;
+    is_serial_execution = false;
+}
+
+
+class PipelineTest {
+    // There are 3 non-thread-bound filter types: serial_in_order and serial_out_of_order, parallel
+    static const tbb::filter::mode non_tb_filters_table[3]; // = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order, tbb::filter::parallel}; 
+    // There are 2 thread-bound filter types: serial_in_order and serial_out_of_order 
+    static const tbb::filter::mode tb_filters_table[2]; // = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order }; 
+    
+    static const unsigned number_of_non_tb_filter_types = sizeof(non_tb_filters_table)/sizeof(non_tb_filters_table[0]);
+    static const unsigned number_of_tb_filter_types = sizeof(tb_filters_table)/sizeof(tb_filters_table[0]);
+    static const unsigned number_of_filter_types = number_of_non_tb_filter_types + number_of_tb_filter_types;
+    // static unsigned my_nthread;
+    public:
+    static double TestOneConfiguration( unsigned numeral, unsigned nthread, unsigned number_of_filters, tbb::internal::Token ntokens);
+    static void TestTrivialPipeline( unsigned nthread, unsigned number_of_filters );
+    static void TestIdleSpinning(unsigned nthread);
+};
+
+const tbb::filter::mode PipelineTest::non_tb_filters_table[3] = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order, tbb::filter::parallel}; 
+const tbb::filter::mode PipelineTest::tb_filters_table[2] = { tbb::filter::serial_in_order, tbb::filter::serial_out_of_order }; 
+
+#include "harness_cpu.h"
+
+double PipelineTest::TestOneConfiguration(unsigned numeral, unsigned nthread, unsigned number_of_filters, tbb::internal::Token ntokens)
+{
+    // Build pipeline
+    tbb::pipeline pipeline;
+    tbb::filter* filter[MaxFilters];
+    unsigned temp = numeral;
+    // parallelism_limit is the upper bound on the possible parallelism
+    unsigned parallelism_limit = 0;
+    // number of thread-bound-filters in the current sequence
+    unsigned number_of_tb_filters = 0;
+    // ordinal numbers of thread-bound-filters in the current sequence
+    unsigned array_of_tb_filter_numbers[MaxFilters];
+    for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) {
+        bool is_bound = temp%number_of_filter_types&0x1;
+        tbb::filter::mode filter_type;
+        if( is_bound ) {
+            filter_type = tb_filters_table[temp%number_of_filter_types/number_of_non_tb_filter_types];
+        } else
+            filter_type = non_tb_filters_table[temp%number_of_filter_types/number_of_tb_filter_types];
+        const bool is_last = i==number_of_filters-1;
+        if( is_bound ) {
+            if( i == 0 )
+                filter[i] = new InputFilter<tbb::thread_bound_filter>(filter_type,ntokens,Done[i],is_last);
+            else
+                filter[i] = new BaseFilter<tbb::thread_bound_filter>(filter_type,Done[i],is_last);
+            array_of_tb_filter_numbers[number_of_tb_filters] = i;
+            number_of_tb_filters++;
+        } else {
+            if( i == 0 )
+                filter[i] = new InputFilter<tbb::filter>(filter_type,ntokens,Done[i],is_last);
+            else
+                filter[i] = new BaseFilter<tbb::filter>(filter_type,Done[i],is_last);
+        }
+        pipeline.add_filter(*filter[i]);
+        if ( filter[i]->is_serial() ) {
+            parallelism_limit += 1;
+        } else {
+            parallelism_limit = nthread;
+        }
+    }
+    clear_global_state();
+    // Account for clipping of parallelism.
+    if( parallelism_limit>nthread ) 
+        parallelism_limit = nthread;
+    if( parallelism_limit>ntokens )
+        parallelism_limit = (unsigned)ntokens;
+    StreamSize = nthread; // min( MaxStreamSize, nthread * MaxStreamItemsPerThread );
+
+    for( unsigned i=0; i<number_of_filters; ++i ) {
+        static_cast<BaseFilter<tbb::filter>*>(filter[i])->current_token=0;
+    }
+    tbb::tbb_thread* t[MaxFilters];
+    for( unsigned j = 0; j<number_of_tb_filters; j++)
+        t[j] = new tbb::tbb_thread(process_loop(), static_cast<tbb::thread_bound_filter*>(filter[array_of_tb_filter_numbers[j]]));
+    if( ntokens == 1 || ( number_of_filters == 1 && number_of_tb_filters == 0 && filter[0]->is_serial() ))
+        is_serial_execution = true;
+    double strttime = GetCPUUserTime();
+    pipeline.run( ntokens );
+    double endtime = GetCPUUserTime();
+    for( unsigned j = 0; j<number_of_tb_filters; j++)
+        t[j]->join();
+    ASSERT( !Harness::ConcurrencyTracker::InstantParallelism(), "filter still running?" );
+    for( unsigned i=0; i<number_of_filters; ++i )
+        ASSERT( static_cast<BaseFilter<tbb::filter>*>(filter[i])->current_token==StreamSize, NULL );
+    for( unsigned i=0; i<MaxFilters; ++i )
+        for( unsigned j=0; j<StreamSize; ++j ) {
+            ASSERT( Done[i][j]==(i<number_of_filters), NULL );
+        }
+    if( Harness::ConcurrencyTracker::PeakParallelism() < parallelism_limit ) 
+        REMARK( "nthread=%lu ntokens=%lu MaxParallelism=%lu parallelism_limit=%lu\n",
+            nthread, ntokens, Harness::ConcurrencyTracker::PeakParallelism(), parallelism_limit );
+    for( unsigned i=0; i < number_of_filters; ++i ) {
+        delete filter[i];
+        filter[i] = NULL;
+    }
+    for( unsigned j = 0; j<number_of_tb_filters; j++)
+        delete t[j];
+    pipeline.clear();
+    return endtime - strttime;
+} // TestOneConfiguration
+
+void PipelineTest::TestTrivialPipeline( unsigned nthread, unsigned number_of_filters ) {
+
+    REMARK( "testing with %lu threads and %lu filters\n", nthread, number_of_filters );
+    ASSERT( number_of_filters<=MaxFilters, "too many filters" );
+    tbb::internal::Token max_tokens = nthread < MaxBuffer ? nthread : MaxBuffer;
+    // The loop has 1 iteration if max_tokens=1 and 2 iterations if max_tokens>1:
+    // one iteration for ntokens=1 and second for ntokens=max_tokens 
+    // Iteration for ntokens=1 is required in each test case to check if pipeline run only on one thread 
+    unsigned max_iteration = max_tokens > 1 ? 2 : 1; 
+    tbb::internal::Token ntokens = 1;
+    for( unsigned iteration = 0; iteration < max_iteration; iteration++) {
+        if( iteration > 0 ) 
+            ntokens = max_tokens;
+        // Count maximum iterations number
+        unsigned limit = 1;
+        for( unsigned i=0; i<number_of_filters; ++i)
+            limit *= number_of_filter_types;
+        // Iterate over possible filter sequences
+        for( unsigned numeral=0; numeral<limit; ++numeral ) {
+            REMARK( "testing configuration %lu of %lu\n", numeral, limit );
+            (void)TestOneConfiguration(numeral, nthread, number_of_filters, ntokens);
+        }
+    }
+}
+
+// varying times for sleep result in different user times for all pipelines.
+// So we compare the running time of an all non-TBF pipeline with different (with
+// luck representative) TBF configurations.
+//
+// We run the tests multiple times and compare the average runtimes for those cases
+// that don't return 0 user time.  configurations that exceed the allowable extra
+// time are reported.
+void PipelineTest::TestIdleSpinning( unsigned nthread)  {
+    unsigned sample_setups[] = {
+        // in the comments below, s == serial, B == thread bound serial, p == parallel
+        1,   // B s s s
+        5,   // s B s s
+        25,  // s s B s
+        125, // s s s B
+        6,   // B B s s
+        26,  // B s B s
+        126, // B s s B
+        30,  // s B B s
+        130, // s B s B
+        150, // s s B B
+        31,  // B B B s
+        131, // B B s B
+        155, // s B B B
+        21,  // B p s s
+        105, // s B p s
+        45,  // s p B s
+        225, // s s p B
+    };
+    const int nsetups = sizeof(sample_setups) / sizeof(unsigned);
+    const int ntests = 4;
+    const double bignum = 1000000000.0;
+    const double allowable_slowdown = 3.5;
+    unsigned zero_count = 0;
+
+    REMARK( "testing idle spinning with %lu threads\n", nthread );
+    tbb::internal::Token max_tokens = nthread < MaxBuffer ? nthread : MaxBuffer;
+    for( int i=0; i<nsetups; ++i ) {
+        unsigned numeral = sample_setups[i];
+        unsigned temp = numeral;
+        unsigned nbound = 0;
+        while(temp) {
+            if((temp%number_of_filter_types)&0x01) nbound++;
+            temp /= number_of_filter_types;
+        }
+        sleeptime = 20.0;
+        double s0 = bignum;
+        double s1 = bignum;
+        int v0cnt = 0;
+        int v1cnt = 0;
+        double s0sum = 0.0;
+        double s1sum = 0.0;
+        for(int j = 0; j < ntests; ++j) {
+            double s1a = TestOneConfiguration(numeral, nthread, MaxFilters, max_tokens);
+            double s0a = TestOneConfiguration((unsigned)0, nthread, MaxFilters, max_tokens);
+            s1sum += s1a;
+            s0sum += s0a;
+            if(s0a > 0.0) {
+                ++v0cnt;
+                s0 = (s0a < s0) ? s0a : s0;
+            }
+            else {
+                ++zero_count;
+            }
+            if(s1a > 0.0) {
+                ++v1cnt;
+                s1 = (s1a < s1) ? s1a : s1;
+            }
+            else {
+                ++zero_count;
+            }
+        }
+        if(s0 == bignum || s1 == bignum) continue;
+        s0sum /= (double)v0cnt;
+        s1sum /= (double)v1cnt;
+        double slowdown = (s1sum-s0sum)/s0sum;
+        if(slowdown > allowable_slowdown)
+            REMARK( "with %lu threads configuration %lu has slowdown > %g (%g)\n", nthread, numeral, allowable_slowdown, slowdown );
+    }
+    REMARK("Total of %lu zero times\n", zero_count);
+}
+
+static int nthread; // knowing number of threads is necessary to call TestCPUUserTime
+
+void waiting_probe::probe( ) {
+    if( nthread==1 ) return;
+    REMARK("emulating wait for input\n");
+    // Test that threads sleep while no work.
+    // The master doesn't sleep so there could be 2 active threads if a worker is waiting for input
+    TestCPUUserTime(nthread, 2);
+}
+
+#include "tbb/task_scheduler_init.h"
+
+int TestMain () {
+    out_of_order_count = 0;
+    if( MinThread<1 ) {
+        REPORT("must have at least one thread");
+        exit(1);
+    }
+
+    sleeptime = 0.0;  // msec : 0 == no_timing, > 0, each filter stage sleeps for sleeptime
+    // Test with varying number of threads.
+    for( nthread=MinThread; nthread<=MaxThread; ++nthread ) {
+        // Initialize TBB task scheduler
+        tbb::task_scheduler_init init(nthread);
+
+        // Test pipelines with 1 and maximal number of filters
+        for( unsigned n=1; n<=MaxFilters; n*=MaxFilters ) {
+            // Thread-bound stages are serviced by user-created threads those 
+            // don't run the pipeline and don't service non-thread-bound stages 
+            PipelineTest::TestTrivialPipeline(nthread,n);
+        }
+
+        // Test that all workers sleep when no work
+        TestCPUUserTime(nthread);
+        if((unsigned)nthread >= MaxFilters)  // test works when number of threads >= number of stages
+            PipelineTest::TestIdleSpinning(nthread);
+    }
+    if( !out_of_order_count )
+        REPORT("Warning: out of order serial filter received tokens in order\n");
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_reader_writer_lock.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_reader_writer_lock.cpp
new file mode 100644 (file)
index 0000000..86a4dd0
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// test reader_writer_lock
+#include "tbb/reader_writer_lock.h"
+#include "tbb/atomic.h"
+#include "tbb/tbb_exception.h"
+#include "harness_assert.h"
+#include "harness.h"
+
+tbb::reader_writer_lock the_mutex;
+const int MAX_WORK = 10000;
+
+tbb::atomic<size_t> active_readers, active_writers;
+tbb::atomic<bool> sim_readers;
+
+
+int BusyWork(int percentOfMaxWork) {
+  int iters = 0;
+  for (int i=0; i<MAX_WORK*((double)percentOfMaxWork/100.0); ++i) {
+      iters++;
+  }
+  return iters;
+}
+
+struct StressRWLBody : NoAssign {
+    const int nThread;
+    const int percentMax;
+
+    StressRWLBody(int nThread_, int percentMax_) : nThread(nThread_), percentMax(percentMax_) {}
+
+    void operator()(const int /* threadID */ ) const {
+        int nIters = 100;
+        int r_result=0, w_result=0;
+        for(int i=0; i<nIters; ++i) {
+            // test unscoped blocking write lock
+            the_mutex.lock();
+            w_result += BusyWork(percentMax);
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+            // test exception for recursive write lock
+            bool was_caught = false;
+            try {
+                the_mutex.lock();
+            }
+            catch(tbb::improper_lock& ex) {
+                REMARK("improper_lock: %s\n", ex.what());
+                was_caught = true;
+            }
+            catch(...) {
+                REPORT("Wrong exception caught during recursive lock attempt.");
+            }
+            ASSERT(was_caught, "Recursive lock attempt exception not caught properly.");
+            // test exception for recursive read lock
+            was_caught = false;
+            try {
+                the_mutex.lock_read();
+            }
+            catch(tbb::improper_lock& ex) {
+                REMARK("improper_lock: %s\n", ex.what());
+                was_caught = true;
+            }
+            catch(...) {
+                REPORT("Wrong exception caught during recursive lock attempt.");
+            }
+            ASSERT(was_caught, "Recursive lock attempt exception not caught properly.");
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+            the_mutex.unlock();
+            // test unscoped non-blocking write lock
+            if (the_mutex.try_lock()) { 
+                w_result += BusyWork(percentMax);
+                the_mutex.unlock();
+            }
+            // test unscoped blocking read lock
+            the_mutex.lock_read();
+            r_result += BusyWork(percentMax);
+            the_mutex.unlock();
+            // test unscoped non-blocking read lock
+            if(the_mutex.try_lock_read()) { 
+                r_result += BusyWork(percentMax);
+                the_mutex.unlock();
+            }
+            { // test scoped blocking write lock
+                tbb::reader_writer_lock::scoped_lock my_lock(the_mutex);
+                w_result += BusyWork(percentMax);
+            }
+            { // test scoped blocking read lock
+                tbb::reader_writer_lock::scoped_lock_read my_lock(the_mutex);
+                r_result += BusyWork(percentMax);
+            }
+        }
+        REMARK("%d reader %d writer iterations of busy work were completed.", r_result, w_result);
+    }
+};
+
+struct CorrectRWLScopedBody : NoAssign {
+    const int nThread;
+
+    CorrectRWLScopedBody(int nThread_) : nThread(nThread_) {}
+
+    void operator()(const int /* threadID */ ) const {
+        bool is_reader;
+        
+        for (int i=0; i<50; i++) {
+            if (i%5==0) is_reader = false; // 1 writer for every 5 readers
+            else is_reader = true;
+
+            if (is_reader) {
+                tbb::reader_writer_lock::scoped_lock_read my_lock(the_mutex);
+                active_readers++;
+                if (active_readers > 1) sim_readers = true;
+                ASSERT(active_writers==0, "Active writers in read-locked region.");
+                Harness::Sleep(10);
+                active_readers--; 
+            }
+            else { // is writer
+                tbb::reader_writer_lock::scoped_lock my_lock(the_mutex);
+                active_writers++;
+                ASSERT(active_readers==0, "Active readers in write-locked region.");
+                ASSERT(active_writers<=1, "More than one active writer in write-locked region.");
+                Harness::Sleep(10);
+                active_writers--;
+            }
+        }
+    }
+};
+
+struct CorrectRWLBody : NoAssign {
+    const int nThread;
+
+    CorrectRWLBody(int nThread_) : nThread(nThread_) {}
+
+    void operator()(const int /* threadID */ ) const {
+        bool is_reader;
+        
+        for (int i=0; i<50; i++) {
+            if (i%5==0) is_reader = false; // 1 writer for every 5 readers
+            else is_reader = true;
+
+            if (is_reader) {
+                the_mutex.lock_read();
+                active_readers++;
+                if (active_readers > 1) sim_readers = true;
+                ASSERT(active_writers==0, "Active writers in read-locked region.");
+            }
+            else { // is writer
+                the_mutex.lock();
+                active_writers++;
+                ASSERT(active_readers==0, "Active readers in write-locked region.");
+                ASSERT(active_writers<=1, "More than one active writer in write-locked region.");
+            }
+            Harness::Sleep(10);
+            if (is_reader) {
+                active_readers--; 
+            }
+            else { // is writer
+                active_writers--;
+            }
+            the_mutex.unlock();
+        }
+    }
+};
+
+void TestReaderWriterLockOnNThreads(int nThreads) {
+    // Stress-test all interfaces
+    for (int pc=0; pc<101; pc+=20) {
+        REMARK("\nTesting reader_writer_lock with %d threads, percent of MAX_WORK=%d", nThreads, pc);
+        StressRWLBody myStressBody(nThreads, pc);
+        NativeParallelFor(nThreads, myStressBody);
+    }
+    
+    // Test mutual exclusion in direct locking mode
+    CorrectRWLBody myCorrectBody(nThreads);
+    active_writers = active_readers = 0;
+    sim_readers = false;
+    NativeParallelFor(nThreads, myCorrectBody);
+    ASSERT(sim_readers || nThreads<2, "There were no simultaneous readers.");
+    REMARK("Unscoped lock testing succeeded on %d threads.", nThreads);
+
+    // Test mutual exclusionin scoped locking mode
+    CorrectRWLScopedBody myCorrectScopedBody(nThreads);
+    active_writers = active_readers = 0;
+    sim_readers = false;
+    NativeParallelFor(nThreads, myCorrectScopedBody);
+    ASSERT(sim_readers || nThreads<2, "There were no simultaneous readers.");
+    REMARK("Scoped lock testing succeeded on %d threads.", nThreads);
+}
+
+void TestReaderWriterLock() {
+    for(int p = MinThread; p <= MaxThread; p++) {
+        TestReaderWriterLockOnNThreads(p);
+    }
+}
+
+
+int TestMain() {
+    if(MinThread <= 0) MinThread = 1;
+    if(MaxThread > 0) {
+        TestReaderWriterLock();
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_rwm_upgrade_downgrade.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_rwm_upgrade_downgrade.cpp
new file mode 100644 (file)
index 0000000..1cb26e2
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/queuing_rw_mutex.h"
+#include "tbb/spin_rw_mutex.h"
+#include "harness.h"
+
+using namespace tbb;
+
+volatile int Count;
+
+template<typename RWMutex>
+struct Hammer: NoAssign {
+    RWMutex &MutexProtectingCount;
+    mutable volatile int dummy;
+
+    Hammer(RWMutex &m): MutexProtectingCount(m) {}
+    void operator()( int /*thread_id*/ ) const {
+        for( int j=0; j<100000; ++j ) {
+            typename RWMutex::scoped_lock lock(MutexProtectingCount,false);
+            int c = Count;
+            for( int k=0; k<10; ++k ) {
+                ++dummy;
+            }
+            if( lock.upgrade_to_writer() ) {
+                // The upgrade succeeded without any intervening writers
+                ASSERT( c==Count, "another thread modified Count while I held a read lock" );
+            } else {
+                c = Count;
+            }
+            for( int k=0; k<10; ++k ) {
+                ++Count;
+            }
+            lock.downgrade_to_reader();
+            for( int k=0; k<10; ++k ) {
+                ++dummy;
+            }
+        }
+    }
+};
+
+queuing_rw_mutex QRW_mutex;
+spin_rw_mutex SRW_mutex;
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK("Testing on %d threads", p);
+        Count = 0;
+        NativeParallelFor( p, Hammer<queuing_rw_mutex>(QRW_mutex) ); 
+        Count = 0;
+        NativeParallelFor( p, Hammer<spin_rw_mutex>(SRW_mutex) );
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_semaphore.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_semaphore.cpp
new file mode 100644 (file)
index 0000000..75b5507
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+//
+// Test for counting semaphore.
+//
+// set semaphore to N
+// create N + M threads
+// have each thread
+//   A. P()
+//   B. increment atomic count
+//   C. spin for awhile checking the value of the count; make sure it doesn't exceed N
+//   D. decrement atomic count
+//   E. V()
+//
+
+#include "tbb/semaphore.h"
+#include "tbb/atomic.h"
+
+#include <vector>
+using std::vector;
+
+#include "harness_assert.h"
+#include "harness.h"
+
+using tbb::internal::semaphore;
+
+#include "harness_barrier.h"
+
+tbb::atomic<int> pCount;
+
+Harness::SpinBarrier sBarrier;
+
+#include "tbb/tick_count.h"
+// semaphore basic function:
+//   set semaphore to initial value
+//   see that semaphore only allows that number of threads to be active
+class Body: NoAssign {
+    const int nThreads;
+    const int nIters;
+    tbb::internal::semaphore &mySem;
+    vector<int> &ourCounts;
+    vector<double> &tottime;
+    static const int tickCounts = 1;  // millisecond
+    static const int innerWait = 5; // millisecond
+public:
+    Body(int nThread_, int nIter_, semaphore &mySem_,
+            vector<int>& ourCounts_,
+            vector<double>& tottime_
+            ) : nThreads(nThread_), nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; }
+void operator()(const int tid) const {
+    sBarrier.wait();
+    for(int i=0; i < nIters; ++i) {
+        Harness::Sleep( tid * tickCounts );
+        tbb::tick_count t0 = tbb::tick_count::now();
+        mySem.P();
+        tbb::tick_count t1 = tbb::tick_count::now();
+        tottime[tid] += (t1-t0).seconds();
+        int curval = ++pCount;
+        if(curval > ourCounts[tid]) ourCounts[tid] = curval;
+        Harness::Sleep( innerWait );
+        --pCount;
+        ASSERT((int)pCount >= 0, NULL);
+        mySem.V();
+    }
+}
+};
+
+
+void testSemaphore( int semInitCnt, int extraThreads ) {
+    semaphore my_sem(semInitCnt);
+    // tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
+    int nThreads = semInitCnt + extraThreads;
+    vector<int> maxVals(nThreads);
+    vector<double> totTimes(nThreads);
+    int nIters = 10;
+    Body myBody(nThreads, nIters, my_sem, maxVals, totTimes);
+
+    REMARK( " sem(%d) with %d extra threads\n", semInitCnt, extraThreads);
+    pCount = 0;
+    NativeParallelFor(nThreads, myBody);
+    if(extraThreads == 0) { 
+        double allPWaits = 0;
+        for(vector<double>::const_iterator j = totTimes.begin(); j != totTimes.end(); ++j) {
+            allPWaits += *j;
+        }
+        allPWaits /= static_cast<double>(nThreads * nIters);
+        REMARK("Average wait for P() in uncontested case for nThreads = %d is %g\n", nThreads, allPWaits);
+    }
+    ASSERT(!pCount, "not all threads decremented pCount");
+    int maxCount = -1;
+    for(vector<int>::const_iterator i=maxVals.begin(); i!= maxVals.end();++i) {
+        maxCount = max(maxCount,*i);
+    }
+    ASSERT(maxCount <= semInitCnt,"too many threads in semaphore-protected increment");
+    if(maxCount < semInitCnt) {
+        REMARK("Not enough threads in semaphore-protected region (%d < %d)\n", static_cast<int>(maxCount), semInitCnt);
+    }
+}
+
+// Power of 2, the most tokens that can be in flight.
+#define MAX_TOKENS 32
+enum FilterType { imaProducer, imaConsumer };
+class FilterBase : NoAssign {
+protected:
+    FilterType ima;
+    unsigned totTokens;  // total number of tokens to be emitted, only used by producer
+    tbb::atomic<unsigned>& myTokens;
+    tbb::atomic<unsigned>& otherTokens;
+    unsigned myWait;
+    semaphore &mySem;
+    semaphore &nextSem;
+    unsigned* myBuffer;
+    unsigned* nextBuffer;
+    unsigned curToken;
+public:
+    FilterBase( FilterType ima_
+            ,unsigned totTokens_ 
+            ,tbb::atomic<unsigned>& myTokens_ 
+            ,tbb::atomic<unsigned>& otherTokens_
+            ,unsigned myWait_
+            ,semaphore &mySem_ 
+            ,semaphore &nextSem_
+            ,unsigned* myBuffer_
+            ,unsigned* nextBuffer_
+            ) 
+        : ima(ima_),totTokens(totTokens_),myTokens(myTokens_),otherTokens(otherTokens_),myWait(myWait_),mySem(mySem_),
+          nextSem(nextSem_),myBuffer(myBuffer_),nextBuffer(nextBuffer_)
+    {
+        curToken = 0;
+    }
+    void Produce(const int tid);
+    void Consume(const int tid);
+    void operator()(const int tid) { if(ima == imaConsumer) Consume(tid); else Produce(tid); }
+};
+
+class ProduceConsumeBody {
+    FilterBase** myFilters;
+    public:
+    ProduceConsumeBody(FilterBase** myFilters_) : myFilters(myFilters_) {}
+    void operator()(const int tid) const {
+        myFilters[tid]->operator()(tid);
+    }
+};
+
+// send a bunch of non-Null "tokens" to consumer, then a NULL.
+void FilterBase::Produce(const int /*tid*/) {
+    nextBuffer[0] = 0;  // just in case we provide no tokens
+    sBarrier.wait();
+    while(totTokens) {
+        while(!myTokens) 
+            mySem.P();
+        // we have a slot available.
+        --myTokens;  // moving this down reduces spurious wakeups
+        --totTokens;
+        if(totTokens) 
+            nextBuffer[curToken&(MAX_TOKENS-1)] = curToken*3+1;
+        else
+            nextBuffer[curToken&(MAX_TOKENS-1)] = (unsigned)NULL;
+        ++curToken;
+        Harness::Sleep(myWait);
+        unsigned temp = ++otherTokens;
+        if(temp == 1) 
+            nextSem.V();
+    }
+    nextSem.V();  // final wakeup
+}
+
+void FilterBase::Consume(const int /*tid*/) {
+    unsigned myToken;
+    sBarrier.wait();
+    do {
+        while(!myTokens) 
+            mySem.P();
+        // we have a slot available.
+        --myTokens;  // moving this down reduces spurious wakeups
+        myToken = myBuffer[curToken&(MAX_TOKENS-1)];
+        if(myToken) {
+            ASSERT(myToken == curToken*3+1, "Error in received token");
+            ++curToken;
+            Harness::Sleep(myWait);
+            unsigned temp = ++otherTokens;
+            if(temp == 1) 
+                nextSem.V();
+        }
+    } while(myToken);
+    // end of processing
+    ASSERT(curToken + 1 == totTokens, "Didn't receive enough tokens");
+}
+
+// -- test of producer/consumer with atomic buffer cnt and semaphore
+// nTokens are total number of tokens through the pipe
+// pWait is the wait time for the producer
+// cWait is the wait time for the consumer
+void testProducerConsumer( unsigned totTokens, unsigned nTokens, unsigned pWait, unsigned cWait) {
+    semaphore pSem;
+    semaphore cSem;
+    tbb::atomic<unsigned> pTokens;
+    tbb::atomic<unsigned> cTokens;
+    cTokens = 0;
+    unsigned cBuffer[MAX_TOKENS];
+    FilterBase* myFilters[2];  // one producer, one consumer
+    REMARK("Testing producer/consumer with %lu total tokens, %lu tokens at a time, producer wait(%lu), consumer wait (%lu)\n", totTokens, nTokens, pWait, cWait);
+    ASSERT(nTokens <= MAX_TOKENS, "Not enough slots for tokens");
+    myFilters[0] = new FilterBase(imaProducer, totTokens, pTokens, cTokens, pWait, cSem, pSem, (unsigned *)NULL, &(cBuffer[0]));
+    myFilters[1] = new FilterBase(imaConsumer, totTokens, cTokens, pTokens, cWait, pSem, cSem, cBuffer, (unsigned *)NULL);
+    pTokens = nTokens;
+    ProduceConsumeBody myBody(myFilters);
+    sBarrier.initialize(2);
+    NativeParallelFor(2, myBody);
+    delete myFilters[0];
+    delete myFilters[1];
+}
+
+int TestMain() {
+    REMARK("Started\n");
+    if(MaxThread > 0) {
+        for(int semSize = 1; semSize <= MaxThread; ++semSize) {
+            for(int exThreads = 0; exThreads <= MaxThread - semSize; ++exThreads) {
+                testSemaphore( semSize, exThreads );
+            }
+        }
+    }
+    // Test producer/consumer with varying execution times and buffer sizes
+    // ( total tokens, tokens in buffer, sleep for producer, sleep for consumer )
+    testProducerConsumer( 10, 2, 5, 5 );
+    testProducerConsumer( 10, 2, 20, 5 );
+    testProducerConsumer( 10, 2, 5, 20 );
+    testProducerConsumer( 10, 1, 5, 5 );
+    testProducerConsumer( 20, 10, 5, 20 );
+    testProducerConsumer( 64, 32, 1, 20 );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_std_thread.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_std_thread.cpp
new file mode 100644 (file)
index 0000000..5032606
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define TBB_IMPLEMENT_CPP0X 1
+#include "tbb/compat/thread"
+#define THREAD std::thread
+#define THIS_THREAD std::this_thread
+#define THIS_THREAD_SLEEP THIS_THREAD::sleep_for
+#include "test_thread.h"
+#include "harness.h"
+
+int TestMain () {
+    CheckSignatures();
+    RunTests();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task.cpp
new file mode 100644 (file)
index 0000000..197f168
--- /dev/null
@@ -0,0 +1,989 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/task.h"
+#include "tbb/atomic.h"
+#include "tbb/tbb_thread.h"
+#include "harness_assert.h"
+#include <cstdlib>
+
+//------------------------------------------------------------------------
+// Helper for verifying that old use cases of spawn syntax still work.
+//------------------------------------------------------------------------
+tbb::task* GetTaskPtr( int& counter ) {
+    ++counter;
+    return NULL;
+}
+//------------------------------------------------------------------------
+// Test for task::spawn_children and task_list
+//------------------------------------------------------------------------
+
+class UnboundedlyRecursiveOnUnboundedStealingTask : public tbb::task {
+    typedef UnboundedlyRecursiveOnUnboundedStealingTask this_type;
+
+    this_type *m_Parent;
+    const int m_Depth; 
+    volatile bool m_GoAhead;
+
+    // Well, virtually unboundedly, for any practical purpose
+    static const int max_depth = 1000000; 
+
+public:
+    UnboundedlyRecursiveOnUnboundedStealingTask( this_type *parent_ = NULL, int depth_ = max_depth )
+        : m_Parent(parent_)
+        , m_Depth(depth_)
+        , m_GoAhead(true)
+    {}
+
+    /*override*/
+    tbb::task* execute() {
+        // Using large padding array sppeds up reaching stealing limit
+        const int paddingSize = 16 * 1024;
+        volatile char padding[paddingSize];
+        if( !m_Parent || (m_Depth > 0 &&  m_Parent->m_GoAhead) ) {
+            if ( m_Parent ) {
+                // We are stolen, let our parent to start waiting for us
+                m_Parent->m_GoAhead = false;
+            }
+            tbb::task &t = *new( tbb::task::allocate_child() ) this_type(this, m_Depth - 1);
+            set_ref_count( 2 );
+            spawn( t );
+            // Give a willing thief a chance to steal
+            for( int i = 0; i < 1000000 && m_GoAhead; ++i ) {
+                ++padding[i % paddingSize];
+                __TBB_Yield();
+            }
+            // If our child has not been stolen yet, then prohibit it siring ones 
+            // of its own (when this thread executes it inside the next wait_for_all)
+            m_GoAhead = false;
+            wait_for_all();
+        }
+        return NULL;
+    }
+}; // UnboundedlyRecursiveOnUnboundedStealingTask
+
+tbb::atomic<int> Count;
+
+class RecursiveTask: public tbb::task {
+    const int m_ChildCount;
+    const int m_Depth; 
+    //! Spawn tasks in list.  Exact method depends upon m_Depth&bit_mask.
+    void SpawnList( tbb::task_list& list, int bit_mask ) {
+        if( m_Depth&bit_mask ) {
+            // Take address to check that signature of spawn(task_list&) is static.
+            void (*s)(tbb::task_list&) = &tbb::task::spawn;
+            (*s)(list);
+            ASSERT( list.empty(), NULL );
+            wait_for_all();
+        } else {
+            spawn_and_wait_for_all(list);
+            ASSERT( list.empty(), NULL );
+        }
+    }
+public:
+    RecursiveTask( int child_count, int depth_ ) : m_ChildCount(child_count), m_Depth(depth_) {}
+    /*override*/ tbb::task* execute() {
+        ++Count;
+        if( m_Depth>0 ) {
+            tbb::task_list list;
+            ASSERT( list.empty(), NULL );
+            for( int k=0; k<m_ChildCount; ++k ) {
+                list.push_back( *new( tbb::task::allocate_child() ) RecursiveTask(m_ChildCount/2,m_Depth-1 ) );
+                ASSERT( !list.empty(), NULL );
+            }
+            set_ref_count( m_ChildCount+1 );
+            SpawnList( list, 1 );
+            // Now try reusing this as the parent.
+            set_ref_count(2);
+            list.push_back( *new (tbb::task::allocate_child() ) tbb::empty_task() );
+            SpawnList( list, 2 );
+        }
+        return NULL;
+    }
+};
+
+//! Compute what Count should be after RecursiveTask(child_count,depth) runs.
+static int Expected( int child_count, int depth ) {
+    return depth<=0 ? 1 : 1+child_count*Expected(child_count/2,depth-1);
+}
+
+#include "tbb/task_scheduler_init.h"
+#include "harness.h"
+
+void TestStealLimit( int nthread ) {
+    REMARK( "testing steal limiting heuristics for %d threads\n", nthread );
+    tbb::task_scheduler_init init(nthread);
+    tbb::task &t = *new( tbb::task::allocate_root() ) UnboundedlyRecursiveOnUnboundedStealingTask();
+    tbb::task::spawn_root_and_wait(t);
+}
+
+//! Test task::spawn( task_list& )
+void TestSpawnChildren( int nthread ) {
+    REMARK("testing task::spawn(task_list&) for %d threads\n",nthread);
+    tbb::task_scheduler_init init(nthread);
+    for( int j=0; j<50; ++j ) {
+        Count = 0;
+        RecursiveTask& p = *new( tbb::task::allocate_root() ) RecursiveTask(j,4);
+        tbb::task::spawn_root_and_wait(p);
+        int expected = Expected(j,4);
+        ASSERT( Count==expected, NULL );
+    }
+}
+
+//! Test task::spawn_root_and_wait( task_list& )
+void TestSpawnRootList( int nthread ) {
+    REMARK("testing task::spawn_root_and_wait(task_list&) for %d threads\n",nthread);
+    tbb::task_scheduler_init init(nthread);
+    for( int j=0; j<5; ++j )
+        for( int k=0; k<10; ++k ) {
+            Count = 0;
+            tbb::task_list list; 
+            for( int i=0; i<k; ++i )
+                list.push_back( *new( tbb::task::allocate_root() ) RecursiveTask(j,4) );
+            tbb::task::spawn_root_and_wait(list);
+            int expected = k*Expected(j,4);
+            ASSERT( Count==expected, NULL );
+        }    
+}
+
+//------------------------------------------------------------------------
+// Test for task::recycle_as_safe_continuation
+//------------------------------------------------------------------------
+
+class TaskGenerator: public tbb::task {
+    int m_ChildCount;
+    int m_Depth;
+    
+public:
+    TaskGenerator( int child_count, int _depth ) : m_ChildCount(child_count), m_Depth(_depth) {}
+    ~TaskGenerator( ) { m_ChildCount = m_Depth = -125; }
+
+    /*override*/ tbb::task* execute() {
+        ASSERT( m_ChildCount>=0 && m_Depth>=0, NULL );
+        if( m_Depth>0 ) {
+            recycle_as_safe_continuation();
+            set_ref_count( m_ChildCount+1 );
+            int k=0; 
+            for( int j=0; j<m_ChildCount; ++j ) {
+                tbb::task& t = *new( allocate_child() ) TaskGenerator(m_ChildCount/2,m_Depth-1);
+                GetTaskPtr(k)->spawn(t);
+            }
+            ASSERT(k==m_ChildCount,NULL);
+            --m_Depth;
+            __TBB_Yield();
+            ASSERT( state()==recycle && ref_count()>0, NULL);
+        }
+        return NULL;
+    }
+};
+
+void TestSafeContinuation( int nthread ) {
+    REMARK("testing task::recycle_as_safe_continuation for %d threads\n",nthread);
+    tbb::task_scheduler_init init(nthread);
+    for( int j=8; j<33; ++j ) {
+        TaskGenerator& p = *new( tbb::task::allocate_root() ) TaskGenerator(j,5);
+        tbb::task::spawn_root_and_wait(p);
+    }
+}
+
+//------------------------------------------------------------------------
+// Test affinity interface
+//------------------------------------------------------------------------
+tbb::atomic<int> TotalCount;
+
+struct AffinityTask: public tbb::task {
+    const tbb::task::affinity_id expected_affinity_id; 
+    bool noted;
+    /** Computing affinities is NOT supported by TBB, and may disappear in the future.
+        It is done here for sake of unit testing. */
+    AffinityTask( int expected_affinity_id_ ) : 
+        expected_affinity_id(tbb::task::affinity_id(expected_affinity_id_)), 
+        noted(false) 
+    {
+        set_affinity(expected_affinity_id);
+        ASSERT( 0u-expected_affinity_id>0u, "affinity_id not an unsigned integral type?" );  
+        ASSERT( affinity()==expected_affinity_id, NULL );
+    } 
+    /*override*/ tbb::task* execute() {
+        ++TotalCount;
+        return NULL;
+    }
+    /*override*/ void note_affinity( affinity_id id ) {
+        // There is no guarantee in TBB that a task runs on its affinity thread.
+        // However, the current implementation does accidentally guarantee it
+        // under certain conditions, such as the conditions here.
+        // We exploit those conditions for sake of unit testing.
+        ASSERT( id!=expected_affinity_id, NULL );
+        ASSERT( !noted, "note_affinity_id called twice!" );
+        ASSERT ( &tbb::task::self() == (tbb::task*)this, "Wrong innermost running task" );
+        noted = true;
+    }
+};
+
+/** Note: This test assumes a lot about the internal implementation of affinity.
+    Do NOT use this as an example of good programming practice with TBB */
+void TestAffinity( int nthread ) {
+    TotalCount = 0;
+    int n = tbb::task_scheduler_init::default_num_threads();
+    if( n>nthread ) 
+        n = nthread;
+    tbb::task_scheduler_init init(n);
+    tbb::empty_task* t = new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task::affinity_id affinity_id = t->affinity();
+    ASSERT( affinity_id==0, NULL );
+    // Set ref_count for n-1 children, plus 1 for the wait.
+    t->set_ref_count(n);
+    // Spawn n-1 affinitized children.
+    for( int i=1; i<n; ++i ) 
+        tbb::task::spawn( *new(t->allocate_child()) AffinityTask(i) );
+    if( n>1 ) {
+        // Keep master from stealing
+        while( TotalCount!=n-1 ) 
+            __TBB_Yield();
+    }
+    // Wait for the children
+    t->wait_for_all();
+    int k = 0;
+    GetTaskPtr(k)->destroy(*t);
+    ASSERT(k==1,NULL);
+}
+
+struct NoteAffinityTask: public tbb::task {
+    bool noted;
+    NoteAffinityTask( int id ) : noted(false)
+    {
+        set_affinity(tbb::task::affinity_id(id));
+    }
+    ~NoteAffinityTask () {
+        ASSERT (noted, "note_affinity has not been called");
+    }
+    /*override*/ tbb::task* execute() {
+        return NULL;
+    }
+    /*override*/ void note_affinity( affinity_id /*id*/ ) {
+        noted = true;
+        ASSERT ( &tbb::task::self() == (tbb::task*)this, "Wrong innermost running task" );
+    }
+};
+
+// This test checks one of the paths inside the scheduler by affinitizing the child task 
+// to non-existent thread so that it is proxied in the local task pool but not retrieved 
+// by another thread. 
+// If no workers requested, the extra slot #2 is allocated for a worker thread to serve
+// "enqueued" tasks. In this test, it is used only for the affinity purpose.
+void TestNoteAffinityContext() {
+    tbb::task_scheduler_init init(1);
+    tbb::empty_task* t = new( tbb::task::allocate_root() ) tbb::empty_task;
+    t->set_ref_count(2);
+    // This master in the absence of workers will have an affinity id of 1. 
+    // So use another number to make the task get proxied.
+    tbb::task::spawn( *new(t->allocate_child()) NoteAffinityTask(2) );
+    t->wait_for_all();
+    tbb::task::destroy(*t);
+}
+
+//------------------------------------------------------------------------
+// Test that recovery actions work correctly for task::allocate_* methods
+// when a task's constructor throws an exception.
+//------------------------------------------------------------------------
+
+#if TBB_USE_EXCEPTIONS
+static int TestUnconstructibleTaskCount;
+
+struct ConstructionFailure {
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Suppress pointless "unreachable code" warning.
+    #pragma warning (push)
+    #pragma warning (disable: 4702)
+#endif
+
+//! Task that cannot be constructed.  
+template<size_t N>
+struct UnconstructibleTask: public tbb::empty_task {
+    char space[N];
+    UnconstructibleTask() {
+        throw ConstructionFailure();
+    }
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif
+
+#define TRY_BAD_CONSTRUCTION(x)                  \
+    {                                            \
+        try {                                    \
+            new(x) UnconstructibleTask<N>;       \
+        } catch( const ConstructionFailure& ) {                                                    \
+            ASSERT( parent()==original_parent, NULL ); \
+            ASSERT( ref_count()==original_ref_count, "incorrectly changed ref_count" );\
+            ++TestUnconstructibleTaskCount;      \
+        }                                        \
+    }
+
+template<size_t N>
+struct RootTaskForTestUnconstructibleTask: public tbb::task {
+    tbb::task* execute() {
+        tbb::task* original_parent = parent();
+        ASSERT( original_parent!=NULL, NULL );
+        int original_ref_count = ref_count();
+        TRY_BAD_CONSTRUCTION( allocate_root() );
+        TRY_BAD_CONSTRUCTION( allocate_child() );
+        TRY_BAD_CONSTRUCTION( allocate_continuation() );
+        TRY_BAD_CONSTRUCTION( allocate_additional_child_of(*this) );
+        return NULL;
+    }
+};
+
+template<size_t N>
+void TestUnconstructibleTask() {
+    TestUnconstructibleTaskCount = 0;
+    tbb::task_scheduler_init init;
+    tbb::task* t = new( tbb::task::allocate_root() ) RootTaskForTestUnconstructibleTask<N>;
+    tbb::task::spawn_root_and_wait(*t);
+    ASSERT( TestUnconstructibleTaskCount==4, NULL );
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+//------------------------------------------------------------------------
+// Test for alignment problems with task objects.
+//------------------------------------------------------------------------
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    // Workaround for pointless warning "structure was padded due to __declspec(align())
+    #pragma warning (push)
+    #pragma warning (disable: 4324)
+#endif
+
+//! Task with members of type T.
+/** The task recursively creates tasks. */
+template<typename T> 
+class TaskWithMember: public tbb::task {
+    T x;
+    T y;
+    unsigned char count;
+    /*override*/ tbb::task* execute() {
+        x = y;
+        if( count>0 ) { 
+            set_ref_count(2);
+            tbb::task* t = new( tbb::task::allocate_child() ) TaskWithMember<T>(count-1);
+            spawn_and_wait_for_all(*t);
+        }
+        return NULL;
+    }
+public:
+    TaskWithMember( unsigned char n ) : count(n) {}
+};
+
+#if _MSC_VER && !defined(__INTEL_COMPILER)
+    #pragma warning (pop)
+#endif
+
+template<typename T> 
+void TestAlignmentOfOneClass() {
+    typedef TaskWithMember<T> task_type;
+    tbb::task* t = new( tbb::task::allocate_root() ) task_type(10);
+    tbb::task::spawn_root_and_wait(*t);
+}
+
+#include "harness_m128.h"
+
+void TestAlignment() {
+    REMARK("testing alignment\n");
+    tbb::task_scheduler_init init;
+    // Try types that have variety of alignments
+    TestAlignmentOfOneClass<char>();
+    TestAlignmentOfOneClass<short>();
+    TestAlignmentOfOneClass<int>();
+    TestAlignmentOfOneClass<long>();
+    TestAlignmentOfOneClass<void*>();
+    TestAlignmentOfOneClass<float>();
+    TestAlignmentOfOneClass<double>();
+#if HAVE_m128
+    TestAlignmentOfOneClass<__m128>();
+#endif /* HAVE_m128 */
+}
+
+//------------------------------------------------------------------------
+// Test for recursing on left while spawning on right
+//------------------------------------------------------------------------
+
+int Fib( int n );
+
+struct RightFibTask: public tbb::task {
+    int* y;
+    const int n;
+    RightFibTask( int* y_, int n_ ) : y(y_), n(n_) {}
+    task* execute() {
+        *y = Fib(n-1);
+        return 0;
+    } 
+};
+
+int Fib( int n ) {
+    if( n<2 ) {
+        return n;
+    } else {
+        // y actually does not need to be initialized.  It is initialized solely to suppress
+        // a gratuitous warning "potentially uninitialized local variable". 
+        int y=-1;
+        tbb::task* root_task = new( tbb::task::allocate_root() ) tbb::empty_task;
+        root_task->set_ref_count(2);
+        tbb::task::spawn( *new( root_task->allocate_child() ) RightFibTask(&y,n) );
+        int x = Fib(n-2);
+        root_task->wait_for_all();
+        tbb::task::destroy(*root_task);
+        return y+x;
+    }
+}
+
+void TestLeftRecursion( int p ) {
+    REMARK("testing non-spawned roots for %d threads\n",p);
+    tbb::task_scheduler_init init(p);
+    int sum = 0; 
+    for( int i=0; i<100; ++i )
+        sum +=Fib(10);
+    ASSERT( sum==5500, NULL );
+}
+
+//------------------------------------------------------------------------
+// Test for computing with DAG of tasks.
+//------------------------------------------------------------------------
+
+class DagTask: public tbb::task {
+    typedef unsigned long long number_t;
+    const int i, j;
+    number_t sum_from_left, sum_from_above;
+    void check_sum( number_t sum ) {
+        number_t expected_sum = 1;
+        for( int k=i+1; k<=i+j; ++k ) 
+            expected_sum *= k;
+        for( int k=1; k<=j; ++k ) 
+            expected_sum /= k;
+        ASSERT(sum==expected_sum, NULL);
+    }
+public:
+    DagTask *successor_to_below, *successor_to_right;
+    DagTask( int i_, int j_ ) : i(i_), j(j_), sum_from_left(0), sum_from_above(0) {}
+    task* execute() {
+        __TBB_ASSERT( ref_count()==0, NULL );
+        number_t sum = i==0 && j==0 ? 1 : sum_from_left+sum_from_above;
+        check_sum(sum);
+        ++execution_count;
+        if( DagTask* t = successor_to_right ) {
+            t->sum_from_left = sum;
+            if( t->decrement_ref_count()==0 )
+                // Test using spawn to evaluate DAG
+                spawn( *t );
+        }
+        if( DagTask* t = successor_to_below ) {
+            t->sum_from_above = sum;
+            if( t->decrement_ref_count()==0 ) 
+                // Test using bypass to evaluate DAG
+                return t;
+        } 
+        return NULL;  
+    }
+    ~DagTask() {++destruction_count;}
+    static tbb::atomic<int> execution_count;
+    static tbb::atomic<int> destruction_count;
+};
+
+tbb::atomic<int> DagTask::execution_count;
+tbb::atomic<int> DagTask::destruction_count;
+
+void TestDag( int p ) {
+    REMARK("testing evaluation of DAG for %d threads\n",p);
+    tbb::task_scheduler_init init(p);
+    DagTask::execution_count=0;
+    DagTask::destruction_count=0;
+    const int n = 10;
+    DagTask* a[n][n];
+    for( int i=0; i<n; ++i ) 
+        for( int j=0; j<n; ++j )
+            a[i][j] = new( tbb::task::allocate_root() ) DagTask(i,j);
+    for( int i=0; i<n; ++i ) 
+        for( int j=0; j<n; ++j ) {
+            a[i][j]->successor_to_below = i+1<n ? a[i+1][j] : NULL;
+            a[i][j]->successor_to_right = j+1<n ? a[i][j+1] : NULL;
+            a[i][j]->set_ref_count((i>0)+(j>0));
+        }
+    a[n-1][n-1]->increment_ref_count();
+    a[n-1][n-1]->spawn_and_wait_for_all(*a[0][0]);
+    ASSERT( DagTask::execution_count == n*n - 1, NULL );
+    tbb::task::destroy(*a[n-1][n-1]);
+    ASSERT( DagTask::destruction_count > n*n - p, NULL );
+    while ( DagTask::destruction_count != n*n )
+        __TBB_Yield();
+}
+
+#include "harness_barrier.h"
+
+class RelaxedOwnershipTask: public tbb::task {
+    tbb::task &m_taskToSpawn,
+              &m_taskToDestroy,
+              &m_taskToExecute;
+    static Harness::SpinBarrier m_barrier;
+
+    tbb::task* execute () {
+        tbb::task &p = *parent();
+        tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+        r.set_ref_count( 1 );
+        m_barrier.wait();
+        p.spawn( *new(p.allocate_child()) tbb::empty_task );
+        p.spawn( *new(task::allocate_additional_child_of(p)) tbb::empty_task );
+        p.spawn( m_taskToSpawn );
+        p.destroy( m_taskToDestroy );
+        r.spawn_and_wait_for_all( m_taskToExecute );
+        p.destroy( r );
+        return NULL;
+    }
+public:
+    RelaxedOwnershipTask ( tbb::task& toSpawn, tbb::task& toDestroy, tbb::task& toExecute )
+        : m_taskToSpawn(toSpawn)
+        , m_taskToDestroy(toDestroy)
+        , m_taskToExecute(toExecute)
+    {}
+    static void SetBarrier ( int numThreads ) { m_barrier.initialize( numThreads ); }
+};
+
+Harness::SpinBarrier RelaxedOwnershipTask::m_barrier;
+
+void TestRelaxedOwnership( int p ) {
+    if ( p < 2 )
+        return;
+
+    if( unsigned(p)>tbb::tbb_thread::hardware_concurrency() )
+        return;
+
+    REMARK("testing tasks exercising relaxed ownership freedom for %d threads\n", p);
+    tbb::task_scheduler_init init(p);
+    RelaxedOwnershipTask::SetBarrier(p);
+    tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task_list tl;
+    for ( int i = 0; i < p; ++i ) {
+        tbb::task &tS = *new( r.allocate_child() ) tbb::empty_task,
+                  &tD = *new( r.allocate_child() ) tbb::empty_task,
+                  &tE = *new( r.allocate_child() ) tbb::empty_task;
+        tl.push_back( *new( r.allocate_child() ) RelaxedOwnershipTask(tS, tD, tE) );
+    }
+    r.set_ref_count( 5 * p + 1 );
+    int k=0;
+    GetTaskPtr(k)->spawn( tl );
+    ASSERT(k==1,NULL);
+    r.wait_for_all();
+    r.destroy( r );
+}
+
+//------------------------------------------------------------------------
+// Test for running TBB scheduler on user-created thread.
+//------------------------------------------------------------------------
+
+void RunSchedulerInstanceOnUserThread( int n_child ) {
+    tbb::task* e = new( tbb::task::allocate_root() ) tbb::empty_task;
+    e->set_ref_count(1+n_child);
+    for( int i=0; i<n_child; ++i )
+        tbb::task::spawn( *new(e->allocate_child()) tbb::empty_task );
+    e->wait_for_all();
+    e->destroy(*e);
+}
+
+void TestUserThread( int p ) {
+    tbb::task_scheduler_init init(p);
+    // Try with both 0 and 1 children.  Only the latter scenario permits stealing.
+    for( int n_child=0; n_child<2; ++n_child ) {
+        tbb::tbb_thread t( RunSchedulerInstanceOnUserThread, n_child );
+        t.join();
+    }
+}
+
+
+class TaskWithChildToSteal : public tbb::task {
+    const int m_Depth; 
+    volatile bool m_GoAhead;
+
+public:
+    TaskWithChildToSteal( int depth_ )
+        : m_Depth(depth_)
+        , m_GoAhead(false)
+    {}
+
+    /*override*/
+    tbb::task* execute() {
+        m_GoAhead = true;
+        if ( m_Depth > 0 ) {
+            TaskWithChildToSteal &t = *new( tbb::task::allocate_child() ) TaskWithChildToSteal(m_Depth - 1);
+            t.SpawnAndWaitOnParent();
+        }
+        else
+            Harness::Sleep(50); // The last task in chain sleeps for 50 ms
+        return NULL;
+    }
+
+    void SpawnAndWaitOnParent() {
+        parent()->set_ref_count( 2 );
+        parent()->spawn( *this );
+        while (!this->m_GoAhead )
+            __TBB_Yield();
+        parent()->wait_for_all();
+    }
+}; // TaskWithChildToSteal
+
+void TestDispatchLoopResponsiveness() {
+    REMARK("testing that dispatch loops do not go into eternal sleep when all remaining children are stolen\n");
+    // Recursion depth values test the following sorts of dispatch loops
+    // 0 - master's outermost
+    // 1 - worker's nested
+    // 2 - master's nested
+    tbb::task_scheduler_init init(2);
+    tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    for ( int depth = 0; depth < 3; ++depth ) {
+        TaskWithChildToSteal &t = *new( r.allocate_child() ) TaskWithChildToSteal(depth);
+        t.SpawnAndWaitOnParent();
+    }
+    r.destroy(r);
+    // The success criteria of this test is not hanging
+}
+
+void TestWaitDiscriminativenessWithoutStealing() {
+    REMARK( "testing that task::wait_for_all is specific to the root it is called on (no workers)\n" );
+    // The test relies on the strict LIFO scheduling order in the absence of workers
+    tbb::task_scheduler_init init(1);
+    tbb::task &r1 = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task &r2 = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    const int NumChildren = 10;
+    r1.set_ref_count( NumChildren + 1 );
+    r2.set_ref_count( NumChildren + 1 );
+    for( int i=0; i < NumChildren; ++i ) {
+        tbb::empty_task &t1 = *new( r1.allocate_child() ) tbb::empty_task;
+        tbb::empty_task &t2 = *new( r2.allocate_child() ) tbb::empty_task;
+        tbb::task::spawn(t1);
+        tbb::task::spawn(t2);
+    }
+    r2.wait_for_all();
+    ASSERT( r2.ref_count() <= 1, "Not all children of r2 executed" );
+    ASSERT( r1.ref_count() > 1, "All children of r1 prematurely executed" );
+    r1.wait_for_all();
+    ASSERT( r1.ref_count() <= 1, "Not all children of r1 executed" );
+    r1.destroy(r1);
+    r2.destroy(r2);
+}
+
+
+using tbb::internal::spin_wait_until_eq;
+
+//! Deterministic emulation of a long running task
+class LongRunningTask : public tbb::task {
+    volatile bool& m_CanProceed;
+
+    tbb::task* execute() {
+        spin_wait_until_eq( m_CanProceed, true );
+        return NULL;
+    }
+public:
+    LongRunningTask ( volatile bool& canProceed ) : m_CanProceed(canProceed) {}
+};
+
+void TestWaitDiscriminativenessWithStealing() {
+    if( tbb::tbb_thread::hardware_concurrency() < 2 )
+        return;
+    REMARK( "testing that task::wait_for_all is specific to the root it is called on (one worker)\n" );
+    volatile bool canProceed = false;
+    tbb::task_scheduler_init init(2);
+    tbb::task &r1 = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task &r2 = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    r1.set_ref_count( 2 );
+    r2.set_ref_count( 2 );
+    tbb::task& t1 = *new( r1.allocate_child() ) tbb::empty_task;
+    tbb::task& t2 = *new( r2.allocate_child() ) LongRunningTask(canProceed);
+    tbb::task::spawn(t2);
+    tbb::task::spawn(t1);
+    r1.wait_for_all();
+    ASSERT( r1.ref_count() <= 1, "Not all children of r1 executed" );
+    ASSERT( r2.ref_count() == 2, "All children of r2 prematurely executed" );
+    canProceed = true;
+    r2.wait_for_all();
+    ASSERT( r2.ref_count() <= 1, "Not all children of r2 executed" );
+    r1.destroy(r1);
+    r2.destroy(r2);
+}
+
+struct MasterBody : NoAssign, Harness::NoAfterlife {
+    static Harness::SpinBarrier my_barrier;
+
+    class BarrenButLongTask : public tbb::task {
+        volatile bool& m_Started;
+        volatile bool& m_CanProceed;
+
+        tbb::task* execute() {
+            m_Started = true;
+            spin_wait_until_eq( m_CanProceed, true );
+            volatile int k = 0;
+            for ( int i = 0; i < 1000000; ++i ) ++k;
+            return NULL;
+        }
+    public:
+        BarrenButLongTask ( volatile bool& started, volatile bool& can_proceed )
+            : m_Started(started), m_CanProceed(can_proceed)
+        {}
+    };
+
+    class BinaryRecursiveTask : public tbb::task {
+        int m_Depth;
+
+        tbb::task* execute() {
+            if( !m_Depth )
+                return NULL;
+            set_ref_count(3);
+            spawn( *new( tbb::task::allocate_child() ) BinaryRecursiveTask(m_Depth - 1) );
+            spawn( *new( tbb::task::allocate_child() ) BinaryRecursiveTask(m_Depth - 1) );
+            wait_for_all();
+            return NULL;
+        }
+
+        void note_affinity( affinity_id ) {
+            __TBB_ASSERT( false, "These tasks cannot be stolen" );
+        }
+    public:
+        BinaryRecursiveTask ( int depth_ ) : m_Depth(depth_) {}
+    };
+
+    void operator() ( int id ) const {
+        if ( id ) {
+            tbb::task_scheduler_init init(2);
+            volatile bool child_started = false,
+                          can_proceed = false;
+            tbb::task& r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+            r.set_ref_count(2);
+            r.spawn( *new(r.allocate_child()) BarrenButLongTask(child_started, can_proceed) );
+            spin_wait_until_eq( child_started, true );
+            my_barrier.wait();
+            can_proceed = true;
+            r.wait_for_all();
+            r.destroy(r);
+        }
+        else {
+            my_barrier.wait();
+            tbb::task_scheduler_init init(1);
+            Count = 0;
+            int depth = 16;
+            BinaryRecursiveTask& r = *new( tbb::task::allocate_root() ) BinaryRecursiveTask(depth);
+            tbb::task::spawn_root_and_wait(r);
+        }
+    }
+public:
+    MasterBody ( int num_masters ) { my_barrier.initialize(num_masters); }
+};
+
+Harness::SpinBarrier MasterBody::my_barrier;
+
+/** Ensures that tasks spawned by a master thread or one of the workers servicing
+    it cannot be stolen by another master thread. **/
+void TestMastersIsolation ( int p ) {
+    // The test requires at least 3-way parallelism to work correctly
+    if ( p > 2 && tbb::task_scheduler_init::default_num_threads() >= p ) {
+        tbb::task_scheduler_init init(p);
+        NativeParallelFor( p, MasterBody(p) );
+    }
+}
+
+//------------------------------------------------------------------------
+// Test for tbb::task::enqueue
+//------------------------------------------------------------------------
+
+const int PairsPerTrack = 100;
+
+class EnqueuedTask : public tbb::task {
+    task* my_successor;
+    int my_enqueue_order;
+    int* my_track;
+    tbb::task* execute() {
+        // Capture execution order in the very beginning
+        int execution_order = 2 - my_successor->decrement_ref_count();
+        // Create some local work.
+        TaskGenerator& p = *new( tbb::task::allocate_root() ) TaskGenerator(2,2);
+        tbb::task::spawn_root_and_wait(p);
+        if( execution_order==2 ) { // the "slower" of two peer tasks
+            ++nCompletedPairs;
+            // Of course execution order can differ from dequeue order.
+            // But there is no better approximation at hand; and a single worker
+            // will execute in dequeue order, which is enough for our check.
+            if (my_enqueue_order==execution_order)
+                ++nOrderedPairs;
+            FireTwoTasks(my_track);
+            destroy(*my_successor);
+        }
+        return NULL;
+    }
+public:
+    EnqueuedTask( task* successor, int enq_order, int* track )
+    : my_successor(successor), my_enqueue_order(enq_order), my_track(track) {}
+
+    // Create and enqueue two tasks
+    static void FireTwoTasks( int* track ) {
+        int progress = ++*track;
+        if( progress < PairsPerTrack ) {
+            task* successor = new (tbb::task::allocate_root()) tbb::empty_task;
+            successor->set_ref_count(2);
+            enqueue( *new (tbb::task::allocate_root()) EnqueuedTask(successor, 1, track) );
+            enqueue( *new (tbb::task::allocate_root()) EnqueuedTask(successor, 2, track) );
+        }
+    }
+
+    static tbb::atomic<int> nCompletedPairs;
+    static tbb::atomic<int> nOrderedPairs;
+};
+
+tbb::atomic<int> EnqueuedTask::nCompletedPairs;
+tbb::atomic<int> EnqueuedTask::nOrderedPairs;
+
+const int nTracks = 10;
+static int TaskTracks[nTracks];
+const int stall_threshold = 100000;
+
+void TimedYield( double pause_time );
+
+class ProgressMonitor {
+public:
+    void operator() ( ) {
+        int track_snapshot[nTracks];
+        int stall_count = 0, uneven_progress_count = 0, last_progress_mask = 0;
+        for(int i=0; i<nTracks; ++i)
+            track_snapshot[i]=0;
+        bool completed;
+        do {
+            // Yield repeatedly for at least 1 usec
+            TimedYield( 1E-6 );
+            int overall_progress = 0, progress_mask = 0;
+            const int all_progressed = (1<<nTracks) - 1;
+            completed = true;
+            for(int i=0; i<nTracks; ++i) {
+                int ti = TaskTracks[i];
+                int pi = ti-track_snapshot[i];
+                if( pi ) progress_mask |= 1<<i;
+                overall_progress += pi;
+                completed = completed && ti==PairsPerTrack;
+                track_snapshot[i]=ti;
+            }
+            // The constants in the next asserts are subjective and may need correction.
+            if( overall_progress )
+                stall_count=0;
+            else {
+                ++stall_count;
+                // no progress for at least 0.1 s; consider it dead.
+                ASSERT(stall_count < stall_threshold, "no progress on enqueued tasks; deadlock?");
+            }
+            if( progress_mask==all_progressed || progress_mask^last_progress_mask ) {
+                uneven_progress_count = 0;
+                last_progress_mask = progress_mask;
+            }
+            else if ( overall_progress > 2 ) {
+                ++uneven_progress_count;
+                ASSERT(uneven_progress_count < 5, "some enqueued tasks seem stalling; no simultaneous progress?");
+            }
+        } while( !completed );
+    }
+};
+
+void TestEnqueue( int p ) {
+    REMARK("testing task::enqueue for %d threads\n", p);
+    for(int mode=0;mode<3;++mode) {
+        tbb::task_scheduler_init init(p);
+        EnqueuedTask::nCompletedPairs = EnqueuedTask::nOrderedPairs = 0;
+        for(int i=0; i<nTracks; ++i) {
+            TaskTracks[i] = -1; // to accomodate for the starting call
+            EnqueuedTask::FireTwoTasks(TaskTracks+i);
+        }
+        ProgressMonitor pm;
+        tbb::tbb_thread thr( pm );
+        if(mode==1) {
+            // do some parallel work in the meantime
+            for(int i=0; i<10; i++) {
+                TaskGenerator& g = *new( tbb::task::allocate_root() ) TaskGenerator(2,5);
+                tbb::task::spawn_root_and_wait(g);
+                TimedYield( 1E-6 );
+            }
+        }
+        if( mode==2 ) {
+            // Additionally enqueue a bunch of empty tasks. The goal is to test that tasks
+            // allocated and enqueued by a thread are safe to use after the thread leaves TBB.
+            tbb::task* root = new (tbb::task::allocate_root()) tbb::empty_task;
+            root->set_ref_count(100);
+            for( int i=0; i<100; ++i )
+                tbb::task::enqueue( *new (root->allocate_child()) tbb::empty_task );
+            init.terminate(); // master thread deregistered
+        }
+        thr.join();
+        ASSERT(EnqueuedTask::nCompletedPairs==nTracks*PairsPerTrack, NULL);
+        ASSERT(EnqueuedTask::nOrderedPairs<EnqueuedTask::nCompletedPairs,
+            "all task pairs executed in enqueue order; de facto guarantee is too strong?");
+    }
+}
+
+//------------------------------------------------------------------------
+// Run all tests.
+//------------------------------------------------------------------------
+
+int TestMain () {
+#if TBB_USE_EXCEPTIONS
+    TestUnconstructibleTask<1>();
+    TestUnconstructibleTask<10000>();
+#endif
+    TestAlignment();
+    TestNoteAffinityContext();
+    TestDispatchLoopResponsiveness();
+    TestWaitDiscriminativenessWithoutStealing();
+    TestWaitDiscriminativenessWithStealing();
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        TestSpawnChildren( p );
+        TestSpawnRootList( p );
+        TestSafeContinuation( p );
+        TestEnqueue( p );
+        TestLeftRecursion( p );
+        TestDag( p );
+        TestAffinity( p );
+        TestUserThread( p );
+        TestStealLimit( p );
+        TestRelaxedOwnership( p );
+#if __TBB_ARENA_PER_MASTER
+        TestMastersIsolation( p );
+#endif /* __TBB_ARENA_PER_MASTER */
+    }
+    return Harness::Done;
+}
+
+#include "tbb/tick_count.h"
+void TimedYield( double pause_time ) {
+    tbb::tick_count start = tbb::tick_count::now();
+    while( (tbb::tick_count::now()-start).seconds() < pause_time )
+        __TBB_Yield();
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_assertions.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_assertions.cpp
new file mode 100644 (file)
index 0000000..b5b9984
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test correctness of forceful TBB initialization before any dynamic initialization
+// of static objects inside the library took place.
+namespace tbb { 
+namespace internal {
+    // Forward declaration of the TBB general initialization routine from task.cpp
+    void DoOneTimeInitializations();
+}}
+
+struct StaticInitializationChecker {
+    StaticInitializationChecker () { tbb::internal::DoOneTimeInitializations(); }
+} theChecker;
+
+//------------------------------------------------------------------------
+// Test that important assertions in class task fail as expected.
+//------------------------------------------------------------------------
+
+#include "harness_inject_scheduler.h"
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+#include "harness_bad_expr.h"
+
+#if TRY_BAD_EXPR_ENABLED
+//! Task that will be abused.
+tbb::task* volatile AbusedTask;
+
+//! Number of times that AbuseOneTask
+int AbuseOneTaskRan;
+
+//! Body used to create task in thread 0 and abuse it in thread 1.
+struct AbuseOneTask {
+    void operator()( int ) const {
+        tbb::task_scheduler_init init;
+        // Thread 1 attempts to incorrectly use the task created by thread 0.
+        tbb::task_list list;
+        // spawn_root_and_wait over empty list should vacuously succeed.
+        tbb::task::spawn_root_and_wait(list);
+
+        // Check that spawn_root_and_wait fails on non-empty list. 
+        list.push_back(*AbusedTask);
+
+        // Try abusing recycle_as_continuation
+        TRY_BAD_EXPR(AbusedTask->recycle_as_continuation(), "execute" );
+        TRY_BAD_EXPR(AbusedTask->recycle_as_safe_continuation(), "execute" );
+        TRY_BAD_EXPR(AbusedTask->recycle_to_reexecute(), "execute" );
+        ++AbuseOneTaskRan;
+    }
+};
+
+//! Test various __TBB_ASSERT assertions related to class tbb::task.
+void TestTaskAssertions() {
+    // Catch assertion failures
+    tbb::set_assertion_handler( AssertionFailureHandler );
+    tbb::task_scheduler_init init;
+    // Create task to be abused
+    AbusedTask = new( tbb::task::allocate_root() ) tbb::empty_task;
+    NativeParallelFor( 1, AbuseOneTask() );
+    ASSERT( AbuseOneTaskRan==1, NULL );
+    AbusedTask->destroy(*AbusedTask);
+    // Restore normal assertion handling
+    tbb::set_assertion_handler( NULL );
+}
+
+int TestMain () {
+    TestTaskAssertions();
+    return Harness::Done;
+}
+
+#else /* !TRY_BAD_EXPR_ENABLED */
+
+int TestMain () {
+    return Harness::Skipped;
+}
+
+#endif /* !TRY_BAD_EXPR_ENABLED */
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_auto_init.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_auto_init.cpp
new file mode 100644 (file)
index 0000000..261b1df
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Testing automatic initialization of TBB task scheduler, so do not use task_scheduler_init anywhere.
+
+#include "tbb/task.h"
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+#include "tbb/atomic.h"
+
+static tbb::atomic<int> g_NumTestsExecuted;
+
+#define TEST_PROLOGUE() ++g_NumTestsExecuted
+
+// Global data used in testing use cases with cross-thread usage of TBB objects
+static tbb::task *g_Root1 = NULL,
+                 *g_Root2 = NULL,
+                 *g_Root3 = NULL,
+                 *g_Task = NULL;
+static tbb::task_group_context* g_Ctx = NULL;
+
+
+void TestTaskSelf () {
+    TEST_PROLOGUE();
+    tbb::task& t = tbb::task::self();
+    ASSERT( !t.parent() && t.ref_count() == 1 && !t.affinity(), "Master's default task properties changed?" );
+}
+
+void TestRootAllocation () {
+    TEST_PROLOGUE();
+    tbb::task &r = *new( tbb::task::allocate_root() ) tbb::empty_task;
+    tbb::task::spawn_root_and_wait(r);
+}
+
+inline void ExecuteChildAndCleanup ( tbb::task &r, tbb::task &t ) {
+    r.set_ref_count(2);
+    r.spawn_and_wait_for_all(t);
+    r.destroy(r);
+}
+
+void TestChildAllocation () {
+    TEST_PROLOGUE();
+    tbb::task &t = *new( g_Root1->allocate_child() ) tbb::empty_task;
+    ExecuteChildAndCleanup( *g_Root1, t );
+}
+
+void TestAdditionalChildAllocation () {
+    TEST_PROLOGUE();
+    tbb::task &t = *new( g_Root2->allocate_additional_child_of(*g_Root2) ) tbb::empty_task;
+    ExecuteChildAndCleanup( *g_Root2, t );
+}
+
+void TestTaskGroupContextCreation () {
+    TEST_PROLOGUE();
+    tbb::task_group_context ctx;
+    tbb::task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task;
+    tbb::task::spawn_root_and_wait(r);
+}
+
+void TestRootAllocationWithContext () {
+    TEST_PROLOGUE();
+    tbb::task* root = new( tbb::task::allocate_root(*g_Ctx) ) tbb::empty_task;
+    tbb::task::spawn_root_and_wait(*root);
+}
+
+void TestSpawn () {
+    TEST_PROLOGUE();
+    g_Task->spawn(*g_Task);
+}
+
+void TestWaitForAll () {
+    TEST_PROLOGUE();
+    g_Root3->wait_for_all();
+    g_Root3->destroy( *g_Root3 );
+}
+
+typedef void (*TestFnPtr)();
+
+const TestFnPtr TestFuncsTable[] = {
+        TestTaskSelf, TestRootAllocation, TestChildAllocation, TestAdditionalChildAllocation, 
+        TestTaskGroupContextCreation, TestRootAllocationWithContext, TestSpawn, TestWaitForAll };
+
+const int NumTestFuncs = sizeof(TestFuncsTable) / sizeof(TestFnPtr);
+
+struct TestThreadBody : NoAssign, Harness::NoAfterlife {
+    // Each invocation of operator() happens in a fresh thread with zero-based ID 
+    // id, and checks a specific auto-initialization scenario.
+    void operator() ( int id ) const {
+        ASSERT( id >= 0 && id < NumTestFuncs, "Test diver: NativeParallelFor is used incorrectly" );
+        TestFuncsTable[id]();
+    }
+};
+
+
+#include "../tbb/tls.h"
+
+void UseAFewNewTlsKeys () {
+    tbb::internal::tls<intptr_t> tls1, tls2, tls3, tls4;
+    tls1 = tls2 = tls3 = tls4 = -1;
+}
+
+using tbb::internal::spin_wait_until_eq;
+
+volatile bool FafStarted   = false,
+              FafCanFinish = false,
+              FafCompleted = false;
+
+//! This task is supposed to be executed during termination of an auto-initialized master thread 
+class FireAndForgetTask : public tbb::task {
+    tbb::task* execute () {
+        // Let another master thread proceed requesting new TLS keys
+        FafStarted = true;
+        UseAFewNewTlsKeys();
+        // Wait while another master thread dirtied its new TLS slots
+        spin_wait_until_eq( FafCanFinish, true );
+        FafCompleted = true;
+        return NULL;
+    }
+public: // to make gcc 3.2.3 happy
+    ~FireAndForgetTask() {
+        ASSERT(FafCompleted, "FireAndForgetTask got erroneously cancelled?");
+    }
+};
+
+#include "harness_barrier.h"
+Harness::SpinBarrier driver_barrier(2);
+
+struct DriverThreadBody : NoAssign, Harness::NoAfterlife {
+    void operator() ( int id ) const {
+        ASSERT( id < 2, "Only two test driver threads are expected" );
+        // a barrier is required to ensure both threads started; otherwise the test may deadlock:
+        // the first thread would execute FireAndForgetTask at shutdown and wait for FafCanFinish,
+        // while the second thread wouldn't even start waiting for the loader lock hold by the first one.
+        if ( id == 0 ) {
+            driver_barrier.wait();
+            // Prepare global data
+            g_Root1 = new( tbb::task::allocate_root() ) tbb::empty_task;
+            g_Root2 = new( tbb::task::allocate_root() ) tbb::empty_task;
+            g_Root3 = new( tbb::task::allocate_root() ) tbb::empty_task;
+            g_Task = new( g_Root3->allocate_child() ) tbb::empty_task;
+            g_Root3->set_ref_count(2);
+            // Run tests
+            NativeParallelFor( NumTestFuncs, TestThreadBody() );
+            ASSERT( g_NumTestsExecuted == NumTestFuncs, "Test driver: Wrong number of tests executed" );
+
+            // This test checks the validity of temporarily restoring the value of 
+            // the last TLS slot for a given key during the termination of an 
+            // auto-initialized master thread (in governor::auto_terminate). 
+            // If anything goes wrong, generic_scheduler::cleanup_master() will assert.
+            // The context for this task must be valid till the task completion.
+            tbb::task &r = *new( tbb::task::allocate_root(*g_Ctx) ) FireAndForgetTask;
+            r.spawn(r);
+        }
+        else {
+            tbb::task_group_context ctx;
+            g_Ctx = &ctx;
+            driver_barrier.wait();
+            spin_wait_until_eq( FafStarted, true );
+            UseAFewNewTlsKeys();
+            FafCanFinish = true;
+            spin_wait_until_eq( FafCompleted, true );
+        }
+    }
+};
+
+int TestMain () {
+    // Do not use any TBB functionality in the main thread!
+    NativeParallelFor( 2, DriverThreadBody() );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_group.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_group.cpp
new file mode 100644 (file)
index 0000000..4d68737
--- /dev/null
@@ -0,0 +1,848 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+//! task_handle<T> cannot be instantiated with a function ptr without explicit cast
+#define __TBB_FUNC_PTR_AS_TEMPL_PARAM_BROKEN ((__linux__ || __APPLE__) && __INTEL_COMPILER && __INTEL_COMPILER < 1100) || __SUNPRO_CC
+#define __TBB_UNQUALIFIED_CALL_OF_DTOR_BROKEN (__GNUC__==3 && __GNUC_MINOR__<=3)
+
+#ifndef TBBTEST_USE_TBB
+    #define TBBTEST_USE_TBB 1
+#endif
+
+#if !TBBTEST_USE_TBB
+    #if defined(_MSC_VER) && _MSC_VER < 1600
+        #ifdef TBBTEST_USE_TBB
+            #undef TBBTEST_USE_TBB
+        #endif
+        #define TBBTEST_USE_TBB 1
+    #endif
+#endif
+
+#if TBBTEST_USE_TBB
+
+    #include "tbb/compat/ppl.h"
+    #include "tbb/task_scheduler_init.h"
+
+    #if _MSC_VER
+        typedef tbb::internal::uint32_t uint_t;
+    #else
+        typedef uint32_t uint_t;
+    #endif
+
+#else /* !TBBTEST_USE_TBB */
+
+    #if defined(_MSC_VER)
+    #pragma warning(disable: 4100 4180)
+    #endif
+
+    #include <ppl.h>
+
+    typedef unsigned int uint_t;
+
+    #define __TBB_SILENT_CANCELLATION_BROKEN  (_MSC_VER == 1600)
+
+#endif /* !TBBTEST_USE_TBB */
+
+
+#include "tbb/atomic.h"
+#include "harness_concurrency_tracker.h"
+
+unsigned g_MaxConcurrency = 0;
+
+typedef tbb::atomic<uint_t> atomic_t;
+typedef Concurrency::task_handle<void(*)()> handle_type;
+
+//------------------------------------------------------------------------
+// Tests for the thread safety of the task_group manipulations
+//------------------------------------------------------------------------
+
+#include "harness_barrier.h"
+
+enum SharingMode {
+    VagabondGroup = 1,
+    ParallelWait = 2
+};
+
+class  SharedGroupBodyImpl : NoCopy, Harness::NoAfterlife {
+    static const uint_t c_numTasks0 = 4096,
+                        c_numTasks1 = 1024;
+
+    const uint_t m_numThreads;
+    const uint_t m_sharingMode;
+
+    Concurrency::task_group *m_taskGroup;
+    atomic_t m_tasksSpawned,
+             m_threadsReady;
+    Harness::SpinBarrier m_barrier;
+
+    static atomic_t s_tasksExecuted;
+
+    struct TaskFunctor {
+        SharedGroupBodyImpl *m_pOwner;
+        void operator () () const {
+            if ( m_pOwner->m_sharingMode & ParallelWait ) {
+                while ( Harness::ConcurrencyTracker::PeakParallelism() < m_pOwner->m_numThreads )
+                    __TBB_Yield();
+            }
+            ++s_tasksExecuted;
+        }
+    };
+
+    TaskFunctor m_taskFunctor;
+
+    void Spawn ( uint_t numTasks ) {
+        for ( uint_t i = 0; i < numTasks; ++i ) {
+            ++m_tasksSpawned;
+            Harness::ConcurrencyTracker ct;
+            m_taskGroup->run( m_taskFunctor );
+        }
+        ++m_threadsReady;
+    }
+
+    void DeleteTaskGroup () {
+        delete m_taskGroup;
+        m_taskGroup = NULL;
+    }
+
+    void Wait () {
+        while ( m_threadsReady != m_numThreads )
+            __TBB_Yield();
+        const uint_t numSpawned = c_numTasks0 + c_numTasks1 * (m_numThreads - 1);
+        ASSERT ( m_tasksSpawned == numSpawned, "Wrong number of spawned tasks. The test is broken" );
+        REMARK("Max spawning parallelism is %u out of %u", Harness::ConcurrencyTracker::PeakParallelism(), g_MaxConcurrency);
+        if ( m_sharingMode & ParallelWait ) {
+            m_barrier.wait( &Harness::ConcurrencyTracker::Reset );
+            {
+                Harness::ConcurrencyTracker ct;
+                m_taskGroup->wait();
+            }
+            if ( Harness::ConcurrencyTracker::PeakParallelism() == 1 )
+                REPORT ( "Warning: No parallel waiting detected in TestParallelWait\n" );
+            m_barrier.wait();
+        }
+        else
+            m_taskGroup->wait();
+        ASSERT ( m_tasksSpawned == numSpawned, "No tasks should be spawned after wait starts. The test is broken" );
+        ASSERT ( s_tasksExecuted == numSpawned, "Not all spawned tasks were executed" );
+    }
+
+public:
+    SharedGroupBodyImpl ( uint_t numThreads, uint_t sharingMode = 0 )
+        : m_numThreads(numThreads)
+        , m_sharingMode(sharingMode)
+        , m_taskGroup(NULL)
+        , m_barrier(numThreads)
+    {
+        ASSERT ( m_numThreads > 1, "SharedGroupBody tests require concurrency" );
+        ASSERT ( !(m_sharingMode & VagabondGroup) || m_numThreads == 2, "In vagabond mode SharedGroupBody must be used with 2 threads only" );
+        Harness::ConcurrencyTracker::Reset();
+        s_tasksExecuted = 0;
+        m_tasksSpawned = 0;
+        m_threadsReady = 0;
+        m_taskFunctor.m_pOwner = this;
+    }
+
+    void Run ( uint_t idx ) {
+#if TBBTEST_USE_TBB
+        tbb::task_scheduler_init init;
+#endif
+        AssertLive();
+        if ( idx == 0 ) {
+            ASSERT ( !m_taskGroup && !m_tasksSpawned, "SharedGroupBody must be reset before reuse");
+            m_taskGroup = new Concurrency::task_group;
+            Spawn( c_numTasks0 );
+            Wait();
+            if ( m_sharingMode & VagabondGroup )
+                m_barrier.wait();
+            else
+                DeleteTaskGroup();
+        }
+        else {
+            while ( m_tasksSpawned == 0 )
+                __TBB_Yield();
+            ASSERT ( m_taskGroup, "Task group is not initialized");
+            Spawn (c_numTasks1);
+            if ( m_sharingMode & ParallelWait )
+                Wait();
+            if ( m_sharingMode & VagabondGroup ) {
+                ASSERT ( idx == 1, "In vagabond mode SharedGroupBody must be used with 2 threads only" );
+                m_barrier.wait();
+                DeleteTaskGroup();
+            }
+        }
+        AssertLive();
+    }
+};
+
+atomic_t SharedGroupBodyImpl::s_tasksExecuted;
+
+class  SharedGroupBody : NoAssign, Harness::NoAfterlife {
+    bool m_bOwner;
+    mutable SharedGroupBodyImpl *m_pImpl;
+public:
+    SharedGroupBody ( uint_t numThreads, uint_t sharingMode = 0 )
+        : m_bOwner(true)
+        , m_pImpl( new SharedGroupBodyImpl(numThreads, sharingMode) )
+    {}
+    SharedGroupBody ( const SharedGroupBody& src )
+        : NoAssign()
+        , Harness::NoAfterlife()
+        , m_bOwner(false)
+        , m_pImpl(src.m_pImpl)
+    {}
+    ~SharedGroupBody () {
+        if ( m_bOwner )
+            delete m_pImpl;
+    }
+    void operator() ( uint_t idx ) const { m_pImpl->Run(idx); }
+};
+
+void TestParallelSpawn () {
+    NativeParallelFor( g_MaxConcurrency, SharedGroupBody(g_MaxConcurrency) );
+}
+
+void TestParallelWait () {
+    NativeParallelFor( g_MaxConcurrency, SharedGroupBody(g_MaxConcurrency, ParallelWait) );
+}
+
+// Tests non-stack-bound task group (the group that is allocated by one thread and destroyed by the other)
+void TestVagabondGroup () {
+    NativeParallelFor( 2, SharedGroupBody(2, VagabondGroup) );
+}
+
+//------------------------------------------------------------------------
+// Common requisites of the Fibonacci tests
+//------------------------------------------------------------------------
+
+const uint_t N = 20;
+const uint_t F = 6765;
+
+atomic_t g_Sum;
+
+#define FIB_TEST_PROLOGUE() \
+    const unsigned numRepeats = g_MaxConcurrency * (TBB_USE_DEBUG ? 4 : 16);    \
+    Harness::ConcurrencyTracker::Reset()
+
+#define FIB_TEST_EPILOGUE(sum) \
+    ASSERT( sum == numRepeats * F, NULL ); \
+    REMARK("Realized parallelism in Fib test is %u out of %u", Harness::ConcurrencyTracker::PeakParallelism(), g_MaxConcurrency)
+
+//------------------------------------------------------------------------
+// Test for a complex tree of task groups
+//
+// The test executes a tree of task groups of the same sort with asymmetric 
+// descendant nodes distribution at each level at each level. 
+//
+// The chores are specified as functor objects. Each task group contains only one chore.
+//------------------------------------------------------------------------
+
+template<uint_t Func(uint_t)>
+struct FibTask : NoAssign, Harness::NoAfterlife {
+    uint_t* m_pRes;
+    const uint_t m_Num;
+    FibTask( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {}
+    void operator() () const {
+        *m_pRes = Func(m_Num);
+    } 
+};
+
+uint_t Fib_SpawnRightChildOnly ( uint_t n ) {
+    Harness::ConcurrencyTracker ct;
+    if( n<2 ) {
+        return n;
+    } else {
+        uint_t y = ~0u;
+        Concurrency::task_group tg;
+        tg.run( FibTask<Fib_SpawnRightChildOnly>(&y, n-1) );
+        uint_t x = Fib_SpawnRightChildOnly(n-2);
+        tg.wait();
+        return y+x;
+    }
+}
+
+void TestFib1 () {
+    FIB_TEST_PROLOGUE();
+    uint_t sum = 0; 
+    for( unsigned i = 0; i < numRepeats; ++i )
+        sum += Fib_SpawnRightChildOnly(N);
+    FIB_TEST_EPILOGUE(sum);
+}
+
+
+//------------------------------------------------------------------------
+// Test for a mixed tree of task groups.
+//
+// The test executes a tree with multiple task of one sort at the first level, 
+// each of which originates in its turn a binary tree of descendant task groups.
+//
+// The chores are specified both as functor objects and as function pointers
+//------------------------------------------------------------------------
+
+uint_t Fib_SpawnBothChildren( uint_t n ) {
+    Harness::ConcurrencyTracker ct;
+    if( n<2 ) {
+        return n;
+    } else {
+        uint_t  y = ~0u,
+                x = ~0u;
+        Concurrency::task_group tg;
+        tg.run( FibTask<Fib_SpawnBothChildren>(&x, n-2) );
+        tg.run( FibTask<Fib_SpawnBothChildren>(&y, n-1) );
+        tg.wait();
+        return y + x;
+    }
+}
+
+void RunFib2 () {
+    g_Sum += Fib_SpawnBothChildren(N);
+}
+
+void TestFib2 () {
+    FIB_TEST_PROLOGUE();
+    g_Sum = 0; 
+    Concurrency::task_group rg;
+    for( unsigned i = 0; i < numRepeats - 1; ++i )
+        rg.run( &RunFib2 );
+    rg.wait();
+    rg.run( &RunFib2 );
+    rg.wait();
+    FIB_TEST_EPILOGUE(g_Sum);
+}
+
+
+//------------------------------------------------------------------------
+// Test for a complex tree of task groups
+// The chores are specified as task handles for recursive functor objects.
+//------------------------------------------------------------------------
+
+class FibTask_SpawnRightChildOnly : NoAssign, Harness::NoAfterlife {
+    uint_t* m_pRes;
+    mutable uint_t m_Num;
+
+public:
+    FibTask_SpawnRightChildOnly( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {}
+    void operator() () const {
+        Harness::ConcurrencyTracker ct;
+        AssertLive();
+        if( m_Num < 2 ) {
+            *m_pRes = m_Num;
+        } else {
+            uint_t y = ~0u;
+            Concurrency::task_group tg;
+            Concurrency::task_handle<FibTask_SpawnRightChildOnly> h = FibTask_SpawnRightChildOnly(&y, m_Num-1);
+            tg.run( h );
+            m_Num -= 2;
+            tg.run_and_wait( *this );
+            *m_pRes += y;
+        }
+    }
+};
+
+uint_t RunFib3 ( uint_t n ) {
+    uint_t res = ~0u;
+    FibTask_SpawnRightChildOnly func(&res, n);
+    func();
+    return res;
+}
+
+void TestTaskHandle () {
+    FIB_TEST_PROLOGUE();
+    uint_t sum = 0; 
+    for( unsigned i = 0; i < numRepeats; ++i )
+        sum += RunFib3(N);
+    FIB_TEST_EPILOGUE(sum);
+}
+
+//------------------------------------------------------------------------
+// Test for a mixed tree of task groups.
+// The chores are specified as task handles for both functor objects and function pointers
+//------------------------------------------------------------------------
+
+template<class task_group_type>
+class FibTask_SpawnBothChildren : NoAssign, Harness::NoAfterlife {
+    uint_t* m_pRes;
+    uint_t m_Num;
+public:
+    FibTask_SpawnBothChildren( uint_t* y, uint_t n ) : m_pRes(y), m_Num(n) {}
+    void operator() () const {
+        Harness::ConcurrencyTracker ct;
+        AssertLive();
+        if( m_Num < 2 ) {
+            *m_pRes = m_Num;
+        } else {
+            uint_t  x = ~0u, // initialized only to suppress warning 
+                    y = ~0u;
+            task_group_type tg;
+            Concurrency::task_handle<FibTask_SpawnBothChildren> h1 = FibTask_SpawnBothChildren(&y, m_Num-1),
+                                                                h2 = FibTask_SpawnBothChildren(&x, m_Num-2);
+            tg.run( h1 );
+            tg.run( h2 );
+            tg.wait();
+            *m_pRes = x + y;
+        }
+    }
+};
+
+template<class task_group_type>
+void RunFib4 () {
+    uint_t res = ~0u;
+    FibTask_SpawnBothChildren<task_group_type> func(&res, N);
+    func();
+    g_Sum += res;
+}
+
+template<class task_group_type>
+void TestTaskHandle2 () {
+    FIB_TEST_PROLOGUE();
+    g_Sum = 0; 
+    task_group_type rg;
+    const unsigned hSize = sizeof(handle_type);
+    char *handles = new char [numRepeats * hSize];
+    handle_type *h = NULL;
+    for( unsigned i = 0; ; ++i ) {
+        h = (handle_type*)(handles + i * hSize);
+#if __TBB_FUNC_PTR_AS_TEMPL_PARAM_BROKEN
+        new ( h ) handle_type((void(*)())RunFib4<task_group_type>);
+#else
+        new ( h ) handle_type(RunFib4<task_group_type>);
+#endif
+        if ( i == numRepeats - 1 )
+            break;
+        rg.run( *h );
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+        bool caught = false;
+        try {
+            rg.run( *h );
+        }
+        catch ( Concurrency::invalid_multiple_scheduling& e ) {
+            ASSERT( e.what(), "Error message is absent" );
+            caught = true;
+        }
+        catch ( ... ) {
+            ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception" );
+        }
+        ASSERT ( caught, "Expected invalid_multiple_scheduling exception is missing" );
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+    }
+    rg.run_and_wait( *h );
+    for( unsigned i = 0; i < numRepeats; ++i )
+#if __TBB_UNQUALIFIED_CALL_OF_DTOR_BROKEN
+        ((handle_type*)(handles + i * hSize))->Concurrency::task_handle<void(*)()>::~task_handle();
+#else
+        ((handle_type*)(handles + i * hSize))->~handle_type();
+#endif
+    delete []handles;
+    FIB_TEST_EPILOGUE(g_Sum);
+}
+
+#if __TBB_LAMBDAS_PRESENT
+//------------------------------------------------------------------------
+// Test for a mixed tree of task groups.
+// The chores are specified as lambdas
+//------------------------------------------------------------------------
+
+void TestFibWithLambdas () {
+    REMARK ("Lambdas test");
+    FIB_TEST_PROLOGUE();
+    atomic_t sum;
+    sum = 0;
+    Concurrency::task_group rg;
+    for( unsigned i = 0; i < numRepeats; ++i )
+        rg.run( [&](){sum += Fib_SpawnBothChildren(N);} );
+    rg.wait();
+    FIB_TEST_EPILOGUE(sum);
+}
+
+//------------------------------------------------------------------------
+// Test for make_task.
+// The chores are specified as lambdas converted to task_handles.
+//------------------------------------------------------------------------
+
+void TestFibWithMakeTask () {
+    REMARK ("Make_task test");
+    atomic_t sum;
+    sum = 0;
+    Concurrency::task_group rg;
+    const auto &h1 = Concurrency::make_task( [&](){sum += Fib_SpawnBothChildren(N);} );
+    const auto &h2 = Concurrency::make_task( [&](){sum += Fib_SpawnBothChildren(N);} );
+    rg.run( h1 );
+    rg.run_and_wait( h2 );
+    ASSERT( sum == 2 * F, NULL );
+}
+#endif /* __TBB_LAMBDAS_PRESENT */
+
+
+//------------------------------------------------------------------------
+// Tests for exception handling and cancellation behavior.
+//------------------------------------------------------------------------
+
+class test_exception : public std::exception
+{
+    const char* m_strDescription;
+public:
+    test_exception ( const char* descr ) : m_strDescription(descr) {}
+
+    const char* what() const throw() { return m_strDescription; }
+};
+
+#if TBB_USE_CAPTURED_EXCEPTION
+    #include "tbb/tbb_exception.h"
+    typedef tbb::captured_exception TestException;
+#else
+    typedef test_exception TestException;
+#endif
+
+#include <string.h>
+
+#define NUM_CHORES      512
+#define NUM_GROUPS      64
+#define SKIP_CHORES     (NUM_CHORES/4)
+#define SKIP_GROUPS     (NUM_GROUPS/4)
+#define EXCEPTION_DESCR1 "Test exception 1"
+#define EXCEPTION_DESCR2 "Test exception 2"
+
+atomic_t g_ExceptionCount;
+atomic_t g_TaskCount;
+unsigned g_ExecutedAtCancellation;
+bool g_Rethrow;
+bool g_Throw;
+#if __TBB_SILENT_CANCELLATION_BROKEN
+    volatile bool g_CancellationPropagationInProgress;
+    #define CATCH_ANY()                                     \
+        __TBB_CATCH( ... ) {                                \
+            if ( g_CancellationPropagationInProgress ) {    \
+                if ( g_Throw ) {                            \
+                    exceptionCaught = true;                 \
+                    ++g_ExceptionCount;                     \
+                }                                           \
+            } else                                          \
+                ASSERT( false, "Unknown exception" );       \
+        }
+#else
+    #define CATCH_ANY()  __TBB_CATCH( ... ) { ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unknown exception" ); }
+#endif
+
+inline
+void ResetGlobals ( bool bThrow, bool bRethrow ) {
+    g_Throw = bThrow;
+    g_Rethrow = bRethrow;
+#if __TBB_SILENT_CANCELLATION_BROKEN
+    g_CancellationPropagationInProgress = false;
+#endif
+    g_ExceptionCount = 0;
+    g_TaskCount = 0;
+    Harness::ConcurrencyTracker::Reset();
+}
+
+class ThrowingTask : NoAssign, Harness::NoAfterlife {
+    atomic_t &m_TaskCount;
+public:
+    ThrowingTask( atomic_t& counter ) : m_TaskCount(counter) {}
+    void operator() () const {
+        Harness::ConcurrencyTracker ct;
+        AssertLive();
+        if ( g_Throw ) {
+            if ( ++m_TaskCount == SKIP_CHORES ) 
+                __TBB_THROW( test_exception(EXCEPTION_DESCR1) );
+            __TBB_Yield();
+        }
+        else {
+            ++g_TaskCount;
+            while( !Concurrency::is_current_task_group_canceling() )
+                __TBB_Yield();
+        }
+    }
+};
+
+void LaunchChildren () {
+    atomic_t count;
+    count = 0;
+    Concurrency::task_group g;
+    bool exceptionCaught = false;
+    for( unsigned i = 0; i < NUM_CHORES; ++i )
+        g.run( ThrowingTask(count) );
+    Concurrency::task_group_status status = Concurrency::not_complete;
+    __TBB_TRY {
+        status = g.wait();
+    } __TBB_CATCH ( TestException& e ) {
+#if TBB_USE_EXCEPTIONS
+        ASSERT( e.what(), "Empty what() string" );
+        ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR1) == 0, "Unknown exception" );
+#endif /* TBB_USE_EXCEPTIONS */
+        exceptionCaught = true;
+        ++g_ExceptionCount;
+    } CATCH_ANY();
+    ASSERT( !g_Throw || exceptionCaught || status == Concurrency::canceled, "No exception in the child task group" );
+    if ( g_Rethrow && g_ExceptionCount > SKIP_GROUPS ) {
+#if __TBB_SILENT_CANCELLATION_BROKEN
+        g_CancellationPropagationInProgress = true;
+#endif
+        __TBB_THROW( test_exception(EXCEPTION_DESCR2) );
+    }
+}
+
+#if TBB_USE_EXCEPTIONS
+void TestEh1 () {
+    ResetGlobals( true, false );
+    Concurrency::task_group rg;
+    for( unsigned i = 0; i < NUM_GROUPS; ++i )
+        // TBB version does not require taking function address 
+        rg.run( &LaunchChildren );
+    try {
+        rg.wait();
+    } catch ( ... ) {
+        ASSERT( false, "Unexpected exception" );
+    }
+    ASSERT( g_ExceptionCount <= NUM_GROUPS, "Too many exceptions from the child groups. The test is broken" );
+    ASSERT( g_ExceptionCount == NUM_GROUPS, "Not all child groups threw the exception" );
+}
+
+void TestEh2 () {
+    ResetGlobals( true, true );
+    Concurrency::task_group rg;
+    bool exceptionCaught = false;
+    for( unsigned i = 0; i < NUM_GROUPS; ++i )
+        // TBB version does not require taking function address 
+        rg.run( &LaunchChildren );
+    try {
+        rg.wait();
+    } catch ( TestException& e ) {
+        ASSERT( e.what(), "Empty what() string" );
+        ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR2) == 0, "Unknown exception" );
+        ASSERT ( !rg.is_canceling(), "wait() has not reset cancellation state" );
+        exceptionCaught = true;
+    } CATCH_ANY();
+    ASSERT( exceptionCaught, "No exception thrown from the root task group" );
+    ASSERT( g_ExceptionCount >= SKIP_GROUPS, "Too few exceptions from the child groups. The test is broken" );
+    ASSERT( g_ExceptionCount <= NUM_GROUPS - SKIP_GROUPS, "Too many exceptions from the child groups. The test is broken" );
+    ASSERT( g_ExceptionCount < NUM_GROUPS - SKIP_GROUPS, "None of the child groups was cancelled" );
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+//------------------------------------------------------------------------
+// Tests for manual cancellation of the task_group hierarchy
+//------------------------------------------------------------------------
+
+void TestCancellation1 () {
+    ResetGlobals( false, false );
+    Concurrency::task_group rg;
+    for( unsigned i = 0; i < NUM_GROUPS; ++i )
+        // TBB version does not require taking function address 
+        rg.run( &LaunchChildren );
+    ASSERT ( !Concurrency::is_current_task_group_canceling(), "Unexpected cancellation" );
+    ASSERT ( !rg.is_canceling(), "Unexpected cancellation" );
+#if __TBB_SILENT_CANCELLATION_BROKEN
+    g_CancellationPropagationInProgress = true;
+#endif
+    while ( g_MaxConcurrency > 1 && g_TaskCount == 0 )
+        __TBB_Yield();
+    rg.cancel();
+    g_ExecutedAtCancellation = g_TaskCount;
+    ASSERT ( rg.is_canceling(), "No cancellation reported" );
+    rg.wait();
+    ASSERT( g_TaskCount <= NUM_GROUPS * NUM_CHORES, "Too many tasks reported. The test is broken" );
+    ASSERT( g_TaskCount < NUM_GROUPS * NUM_CHORES, "No tasks were cancelled. Cancellation model changed?" );
+    ASSERT( g_TaskCount <= g_ExecutedAtCancellation + Harness::ConcurrencyTracker::PeakParallelism(), "Too many tasks survived cancellation" );
+}
+
+//------------------------------------------------------------------------
+// Tests for manual cancellation of the structured_task_group hierarchy
+//------------------------------------------------------------------------
+
+void StructuredLaunchChildren () {
+    atomic_t count;
+    count = 0;
+    Concurrency::structured_task_group g;
+    bool exceptionCaught = false;
+    typedef Concurrency::task_handle<ThrowingTask> handle_type;
+    static const unsigned hSize = sizeof(handle_type);
+    char handles[NUM_CHORES * hSize];
+    for( unsigned i = 0; i < NUM_CHORES; ++i ) {
+        handle_type *h = (handle_type*)(handles + i * hSize);
+        new ( h ) handle_type( ThrowingTask(count) );
+        g.run( *h );
+    }
+    __TBB_TRY {
+        g.wait();
+    } __TBB_CATCH( TestException& e ) {
+#if TBB_USE_EXCEPTIONS
+        ASSERT( e.what(), "Empty what() string" );
+        ASSERT( __TBB_EXCEPTION_TYPE_INFO_BROKEN || strcmp(e.what(), EXCEPTION_DESCR1) == 0, "Unknown exception" );
+#endif /* TBB_USE_EXCEPTIONS */
+#if __TBB_SILENT_CANCELLATION_BROKEN
+        ASSERT ( !g.is_canceling() || g_CancellationPropagationInProgress, "wait() has not reset cancellation state" );
+#else
+        ASSERT ( !g.is_canceling(), "wait() has not reset cancellation state" );
+#endif
+        exceptionCaught = true;
+        ++g_ExceptionCount;
+    } CATCH_ANY();
+    ASSERT( !g_Throw || exceptionCaught, "No exception in the child task group" );
+    for( unsigned i = 0; i < NUM_CHORES; ++i )
+        ((handle_type*)(handles + i * hSize))->~handle_type();
+    if ( g_Rethrow && g_ExceptionCount > SKIP_GROUPS ) {
+#if __TBB_SILENT_CANCELLATION_BROKEN
+        g_CancellationPropagationInProgress = true;
+#endif
+        __TBB_THROW( test_exception(EXCEPTION_DESCR2) );
+    }
+}
+
+class StructuredCancellationTestDriver {
+    static const unsigned hSize = sizeof(handle_type);
+    char m_handles[NUM_CHORES * hSize];
+
+public:
+    void Launch ( Concurrency::structured_task_group& rg ) {
+        ResetGlobals( false, false );
+        for( unsigned i = 0; i < NUM_GROUPS; ++i ) {
+            handle_type *h = (handle_type*)(m_handles + i * hSize);
+            new ( h ) handle_type( StructuredLaunchChildren );
+            rg.run( *h );
+        }
+        ASSERT ( !Concurrency::is_current_task_group_canceling(), "Unexpected cancellation" );
+        ASSERT ( !rg.is_canceling(), "Unexpected cancellation" );
+#if __TBB_SILENT_CANCELLATION_BROKEN
+        g_CancellationPropagationInProgress = true;
+#endif
+        while ( g_MaxConcurrency > 1 && g_TaskCount == 0 )
+            __TBB_Yield();
+    }
+
+    void Finish () {
+        for( unsigned i = 0; i < NUM_GROUPS; ++i )
+            ((handle_type*)(m_handles + i * hSize))->~handle_type();
+        ASSERT( g_TaskCount <= NUM_GROUPS * NUM_CHORES, "Too many tasks reported. The test is broken" );
+        ASSERT( g_TaskCount < NUM_GROUPS * NUM_CHORES, "No tasks were cancelled. Cancellation model changed?" );
+        ASSERT( g_TaskCount <= g_ExecutedAtCancellation + g_MaxConcurrency, "Too many tasks survived cancellation" );
+    }
+}; // StructuredCancellationTestDriver
+
+void TestStructuredCancellation1 () {
+    StructuredCancellationTestDriver driver;
+    Concurrency::structured_task_group sg;
+    driver.Launch( sg );
+    sg.cancel();
+    g_ExecutedAtCancellation = g_TaskCount;
+    ASSERT ( sg.is_canceling(), "No cancellation reported" );
+    sg.wait();
+    driver.Finish();
+}
+
+#if TBB_USE_EXCEPTIONS
+#if defined(_MSC_VER)
+    #pragma warning (disable: 4127)
+#endif
+
+template<bool Throw>
+void TestStructuredCancellation2 () {
+    bool exception_occurred = false,
+         unexpected_exception = false;
+    StructuredCancellationTestDriver driver;
+    try {
+        Concurrency::structured_task_group tg;
+        driver.Launch( tg );
+        if ( Throw )
+            throw int(); // Initiate stack unwinding
+    }
+    catch ( const Concurrency::missing_wait& e ) {
+        ASSERT( e.what(), "Error message is absent" );
+        exception_occurred = true;
+        unexpected_exception = Throw;
+    }
+    catch ( int ) {
+        exception_occurred = true;
+        unexpected_exception = !Throw;
+    }
+    catch ( ... ) {
+        exception_occurred = unexpected_exception = true;
+    }
+    ASSERT( exception_occurred, NULL );
+    ASSERT( !unexpected_exception, NULL );
+    driver.Finish();
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+void EmptyFunction () {}
+
+void TestStructuredWait () {
+    Concurrency::structured_task_group sg;
+    handle_type h(EmptyFunction);
+    sg.run(h);
+    sg.wait();
+    handle_type h2(EmptyFunction);
+    sg.run(h2);
+    sg.wait();
+}
+
+int TestMain () {
+    REMARK ("Testing %s task_group functionality\n", TBBTEST_USE_TBB ? "TBB" : "PPL");
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        g_MaxConcurrency = p;
+#if TBBTEST_USE_TBB
+        tbb::task_scheduler_init init(p);
+#else
+        Concurrency::SchedulerPolicy sp( 4,
+                                Concurrency::SchedulerKind, Concurrency::ThreadScheduler,
+                                Concurrency::MinConcurrency, 1,
+                                Concurrency::MaxConcurrency    , p,
+                                Concurrency::TargetOversubscriptionFactor, 1);
+        Concurrency::Scheduler  *s = Concurrency::Scheduler::Create( sp );
+#endif /* !TBBTEST_USE_TBB */
+        if ( p > 1 ) {
+            TestParallelSpawn();
+            TestParallelWait();
+            TestVagabondGroup();
+        }
+        TestFib1();
+        TestFib2();
+        TestTaskHandle();
+        TestTaskHandle2<Concurrency::task_group>();
+        TestTaskHandle2<Concurrency::structured_task_group>();
+#if __TBB_LAMBDAS_PRESENT
+        TestFibWithLambdas();
+        TestFibWithMakeTask();
+#endif
+        TestCancellation1();
+        TestStructuredCancellation1();
+#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+        TestEh1();
+        TestEh2();
+        TestStructuredWait();
+        TestStructuredCancellation2<true>();
+        TestStructuredCancellation2<false>();
+#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN */
+#if !TBBTEST_USE_TBB
+        s->Release();
+#endif
+    }
+#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+    REPORT("Known issue: exception handling tests are skipped.\n");
+#endif
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_leaks.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_leaks.cpp
new file mode 100644 (file)
index 0000000..8d5c012
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/*  The test uses "single produces multiple consumers" (SPMC )pattern to check 
+    if the memory of the tasks stolen by consumer threads is returned to the 
+    producer thread and is reused.
+
+    The test consists of a series of iterations, which execute a task tree.
+    the test fails is the memory consumption is not stabilized during some
+    number of iterations.
+
+    After the memory consumption stabilized the memory state is perturbed by 
+    switching producer thread, and the check is repeated.
+*/
+
+#define  __TBB_COUNT_TASK_NODES 1
+#include "harness_inject_scheduler.h"
+
+#include "tbb/atomic.h"
+#include "harness_assert.h"
+#include <cstdlib>
+
+
+// Test configuration parameters
+
+//! Maximal number of test iterations
+const int MaxIterations = 600;
+//! Number of iterations during which the memory consumption must stabilize
+const int AsymptoticRange = 100;
+//! Number of times the memory state is perturbed to repeat the check
+const int NumProducerSwitches = 2;
+//! Number of iterations after which the success of producer switch is checked
+const int ProducerCheckTimeout = 10;
+//! Number of initial iteration used to collect statistics to be used in later checks
+const int InitialStatsIterations = 20;
+
+tbb::atomic<int> Count;
+tbb::atomic<tbb::task*> Exchanger;
+tbb::internal::scheduler* Producer;
+
+#include "tbb/task_scheduler_init.h"
+
+#define HARNESS_DEFAULT_MIN_THREADS -1
+#include "harness.h"
+
+using namespace tbb;
+using namespace tbb::internal;
+
+class ChangeProducer: public tbb::task {
+public:
+    /*override*/ tbb::task* execute() {
+        if( is_stolen_task() ) {
+            Producer = internal::governor::local_scheduler();
+        }
+        return NULL;
+    }
+};
+
+class TaskGenerator: public tbb::task {
+    const int my_child_count;
+    int my_depth;
+public:
+    TaskGenerator(int child_count, int depth) : my_child_count(child_count), my_depth(depth) {
+        ASSERT(my_child_count>1, "The TaskGenerator should produce at least two children");
+    }
+    /*override*/ tbb::task* execute() {
+        if( my_depth>0 ) {
+            int child_count = my_child_count;
+            scheduler* my_sched = internal::governor::local_scheduler();
+            tbb::task& c  = *new( tbb::task::allocate_continuation() ) tbb::empty_task;
+            c.set_ref_count( child_count );
+            recycle_as_child_of(c);
+            --child_count;
+            if( Producer==my_sched ) {
+                // produce a task and put it into Exchanger
+                tbb::task* t = new( c.allocate_child() ) tbb::empty_task;
+                --child_count;
+                t = Exchanger.fetch_and_store(t);
+                if( t ) spawn(*t);
+            } else {
+                tbb::task* t = Exchanger.fetch_and_store(NULL);
+                if( t ) spawn(*t);
+            }
+            while( child_count ) {
+                tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1);
+                if( my_depth >4 ) enqueue(*t);
+                else              spawn(*t);
+                --child_count;
+            }
+            --my_depth;
+            return this;
+        } else {
+            tbb::task* t = Exchanger.fetch_and_store(NULL);
+            if( t ) spawn(*t);
+            return NULL;
+        }
+    }
+};
+
+#include "harness_memory.h"
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+    // VS2008/VC9 seems to have an issue
+    #pragma warning( push )
+    #pragma warning( disable: 4985 )
+#endif
+#include <math.h>
+#if _MSC_VER==1500 && !defined(__INTEL_COMPILER)
+    #pragma warning( pop )
+#endif
+
+void RunTaskGenerators( bool switchProducer = false, bool checkProducer = false ) {
+    if( switchProducer )
+        Producer = NULL;
+    tbb::task* dummy_root = new( tbb::task::allocate_root() ) tbb::empty_task;
+    dummy_root->set_ref_count( 2 );
+    // If no producer, start elections; some worker will take the role
+    if( Producer )
+        dummy_root->spawn( *new( dummy_root->allocate_child() ) tbb::empty_task );
+    else
+        dummy_root->spawn( *new( dummy_root->allocate_child() ) ChangeProducer );
+    if( checkProducer && !Producer )
+        REPORT("Warning: producer has not changed after 10 attempts; running on a single core?\n");
+    for( int j=0; j<100; ++j ) {
+        if( j&1 ) {
+            tbb::task& t = *new( tbb::task::allocate_root() ) TaskGenerator(/*child_count=*/4, /*depth=*/6);
+            tbb::task::spawn_root_and_wait(t);
+        } else {
+            tbb::task& t = *new (dummy_root->allocate_additional_child_of(*dummy_root))
+                                TaskGenerator(/*child_count=*/4, /*depth=*/6);
+            tbb::task::enqueue(t);
+        }
+    }
+    dummy_root->wait_for_all();
+    dummy_root->destroy( *dummy_root );
+}
+
+//! Tests whether task scheduler allows thieves to hoard task objects.
+/** The test takes a while to run, so we run it only with the default
+    number of threads. */
+void TestTaskReclamation() {
+    REMARK("testing task reclamation\n");
+
+    size_t initial_amount_of_memory = 0;
+    double task_count_sum = 0;
+    double task_count_sum_square = 0;
+    double average, sigma;
+
+    tbb::task_scheduler_init init (MinThread);
+    REMARK("Starting with %d threads\n", MinThread);
+    // For now, the master will produce "additional" tasks; later a worker will replace it;
+    Producer  = internal::governor::local_scheduler();
+    int N = InitialStatsIterations;
+    // First N iterations fill internal buffers and collect initial statistics
+    for( int i=0; i<N; ++i ) {
+        // First N iterations fill internal buffers and collect initial statistics
+        RunTaskGenerators();
+
+        size_t m = GetMemoryUsage();
+        if( m-initial_amount_of_memory > 0)
+            initial_amount_of_memory = m;
+
+        intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true );
+        task_count_sum += n;
+        task_count_sum_square += n*n;
+
+        REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n", long(m), long(n), i );
+    }
+    // Calculate statistical values
+    average = task_count_sum / N;
+    sigma   = sqrt( (task_count_sum_square - task_count_sum*task_count_sum/N)/N );
+    REMARK("Average task count: %g, sigma: %g, sum: %g, square sum:%g \n", average, sigma, task_count_sum, task_count_sum_square);
+
+    int     last_error_iteration = 0,
+            producer_switch_iteration = 0,
+            producer_switches = 0;
+    bool    switchProducer = false, 
+            checkProducer = false;
+    for( int i=0; i < MaxIterations; ++i ) {
+        // These iterations check for excessive memory use and unreasonable task count
+        RunTaskGenerators( switchProducer, checkProducer );
+
+        intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true );
+        size_t m = GetMemoryUsage();
+
+        if( (m-initial_amount_of_memory > 0) && (n > average+4*sigma) ) {
+            // Use 4*sigma interval (for normal distribution, 3*sigma contains ~99% of values).
+            REMARK( "Warning: possible leak of up to %ld bytes; currently %ld cached task objects (iteration=%d)\n",
+                    static_cast<unsigned long>(m-initial_amount_of_memory), long(n), i );
+            last_error_iteration = i;
+            initial_amount_of_memory = m;
+        } else {
+            REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n", long(m), long(n), i );
+        }
+        if ( i == last_error_iteration + AsymptoticRange ) {
+            if ( producer_switches++ == NumProducerSwitches )
+                break;
+            else {
+                last_error_iteration = producer_switch_iteration = i;
+                switchProducer = true;
+            }
+        }
+        else {
+            switchProducer = false;
+            checkProducer = producer_switch_iteration && (i == producer_switch_iteration + ProducerCheckTimeout);
+        }
+    }
+    ASSERT( last_error_iteration < MaxIterations - AsymptoticRange, "The amount of allocated tasks keeps growing. Leak is possible." );
+}
+
+int TestMain () {
+    if( !GetMemoryUsage() ) {
+        REMARK("GetMemoryUsage is not implemented for this platform\n");
+        return Harness::Skipped;
+    }
+    TestTaskReclamation();
+    return Harness::Done;
+}
+
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_init.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_init.cpp
new file mode 100644 (file)
index 0000000..1b953c5
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/task_scheduler_init.h"
+#include <cstdlib>
+#include "harness_assert.h"
+
+#include <cstdio>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <stdexcept>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "harness.h"
+
+//! Test that task::initialize and task::terminate work when doing nothing else.
+/** maxthread is treated as the "maximum" number of worker threads. */
+void InitializeAndTerminate( int maxthread ) {
+    __TBB_TRY {
+        for( int i=0; i<200; ++i ) {
+            switch( i&3 ) {
+                default: {
+                    tbb::task_scheduler_init init( std::rand() % maxthread + 1 );
+                    ASSERT(init.is_active(), NULL);
+                    break;
+                }
+                case 0: {   
+                    tbb::task_scheduler_init init;
+                    ASSERT(init.is_active(), NULL);
+                    break;
+                }
+                case 1: {
+                    tbb::task_scheduler_init init( tbb::task_scheduler_init::automatic );
+                    ASSERT(init.is_active(), NULL);
+                    break;
+                }
+                case 2: {
+                    tbb::task_scheduler_init init( tbb::task_scheduler_init::deferred );
+                    ASSERT(!init.is_active(), "init should not be active; initialization was deferred");
+                    init.initialize( std::rand() % maxthread + 1 );
+                    ASSERT(init.is_active(), NULL);
+                    init.terminate();
+                    ASSERT(!init.is_active(), "init should not be active; it was terminated");
+                    break;
+                }
+            }
+        }
+    } __TBB_CATCH( std::runtime_error& error ) {
+#if TBB_USE_EXCEPTIONS
+        REPORT("ERROR: %s\n", error.what() );
+#endif /* TBB_USE_EXCEPTIONS */
+    }
+}
+
+#if _WIN64
+namespace std {      // 64-bit Windows compilers have not caught up with 1998 ISO C++ standard
+    using ::srand;
+}
+#endif /* _WIN64 */
+
+struct ThreadedInit {
+    void operator()( int ) const {
+        InitializeAndTerminate(MaxThread);
+    }
+};
+
+#if _MSC_VER
+#include "tbb/machine/windows_api.h"
+#include <tchar.h>
+#endif /* _MSC_VER */
+
+#include "harness_concurrency_tracker.h"
+#include "tbb/parallel_for.h"
+#include "tbb/blocked_range.h"
+
+typedef tbb::blocked_range<int> Range;
+
+class ConcurrencyTrackingBody {
+public:
+    void operator() ( const Range& ) const {
+        Harness::ConcurrencyTracker ct;
+        for ( volatile int i = 0; i < 1000000; ++i )
+            ;
+    }
+};
+
+/** The test will fail in particular if task_scheduler_init mistakenly hooks up 
+    auto-initialization mechanism. **/
+void AssertExplicitInitIsNotSupplanted () {
+    int hardwareConcurrency = tbb::task_scheduler_init::default_num_threads();
+    tbb::task_scheduler_init init(1);
+    Harness::ConcurrencyTracker::Reset();
+    tbb::parallel_for( Range(0, hardwareConcurrency * 2, 1), ConcurrencyTrackingBody(), tbb::simple_partitioner() );
+    ASSERT( Harness::ConcurrencyTracker::PeakParallelism() == 1, 
+            "Manual init provided more threads than requested. See also the comment at the beginning of main()." );
+}
+
+int TestMain () {
+    // Do not use tbb::task_scheduler_init directly in the scope of main's body,
+    // as a static variable, or as a member of a static variable.
+#if _MSC_VER && !__TBB_NO_IMPLICIT_LINKAGE
+    #ifdef _DEBUG
+        ASSERT(!GetModuleHandle(_T("tbb.dll")) && GetModuleHandle(_T("tbb_debug.dll")),
+            "test linked with wrong (non-debug) tbb library");
+    #else
+        ASSERT(!GetModuleHandle(_T("tbb_debug.dll")) && GetModuleHandle(_T("tbb.dll")),
+            "test linked with wrong (debug) tbb library");
+    #endif
+#endif /* _MSC_VER && !__TBB_NO_IMPLICIT_LINKAGE */
+    std::srand(2);
+    InitializeAndTerminate(MaxThread);
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK("testing with %d threads\n", p );
+        NativeParallelFor( p, ThreadedInit() );
+    }
+    AssertExplicitInitIsNotSupplanted();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_observer.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_task_scheduler_observer.cpp
new file mode 100644 (file)
index 0000000..99936f8
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/task_scheduler_observer.h"
+
+typedef uintptr_t FlagType;
+const int MaxFlagIndex = sizeof(FlagType)*8-1;
+
+class MyObserver: public tbb::task_scheduler_observer {
+    FlagType flags;
+    /*override*/ void on_scheduler_entry( bool is_worker );
+    /*override*/ void on_scheduler_exit( bool is_worker );
+public:
+    MyObserver( FlagType flags_ ) : flags(flags_) {
+        observe(true);
+    }
+};
+
+#include "harness_assert.h"
+#include "tbb/atomic.h"
+
+tbb::atomic<int> EntryCount;
+tbb::atomic<int> ExitCount;
+
+struct State {
+    FlagType MyFlags;
+    bool IsMaster;
+    State() : MyFlags(), IsMaster() {}
+};
+
+#include "../tbb/tls.h"
+tbb::internal::tls<State*> LocalState;
+
+void MyObserver::on_scheduler_entry( bool is_worker ) {
+    State& state = *LocalState;
+    ASSERT( is_worker==!state.IsMaster, NULL );
+#if !__TBB_ARENA_PER_MASTER
+    ASSERT( (state.MyFlags & flags)==0, NULL );
+#endif /* !__TBB_ARENA_PER_MASTER */
+    ++EntryCount;
+    state.MyFlags |= flags;
+}
+
+void MyObserver::on_scheduler_exit( bool is_worker ) {
+    State& state = *LocalState;
+    ASSERT( is_worker==!state.IsMaster, NULL );
+    ++ExitCount;
+    state.MyFlags &= ~flags;
+}
+
+#include "tbb/task.h"
+
+class FibTask: public tbb::task {
+    const int n;
+    FlagType flags;
+public:
+    FibTask( int n_, FlagType flags_ ) : n(n_), flags(flags_) {}
+    /*override*/ tbb::task* execute() {
+        ASSERT( !(~LocalState->MyFlags & flags), NULL );
+        if( n>=2 ) {
+            set_ref_count(3);
+            spawn(*new( tbb::task::allocate_child() ) FibTask(n-1,flags));
+            spawn_and_wait_for_all(*new( tbb::task::allocate_child() ) FibTask(n-2,flags));
+        }
+        return NULL;
+    }
+};
+
+void DoFib( FlagType flags ) {
+    tbb::task* t = new( tbb::task::allocate_root() ) FibTask(10,flags);
+    tbb::task::spawn_root_and_wait(*t);
+}
+
+#include "tbb/task_scheduler_init.h"
+#include "harness.h"
+
+class DoTest {
+    int nthread;
+public:
+    DoTest( int n ) : nthread(n) {}
+    void operator()( int i ) const {
+        LocalState->IsMaster = true;
+        if( i==0 ) {   
+            tbb::task_scheduler_init init(nthread);
+            DoFib(0);
+        } else {
+            FlagType f = i<=MaxFlagIndex? 1<<i : 0;
+            MyObserver w(f);
+            tbb::task_scheduler_init init(nthread);
+            DoFib(f);
+        }
+    }
+};
+
+void TestObserver( int p, int q ) {
+    NativeParallelFor( p, DoTest(q) );
+}
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) 
+        for( int q=MinThread; q<=MaxThread; ++q ) 
+            TestObserver(p,q);
+    ASSERT( EntryCount>0, "on_scheduler_entry not exercised" );
+    ASSERT( ExitCount>0, "on_scheduler_exit not exercised" );
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_condition_variable.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_condition_variable.cpp
new file mode 100644 (file)
index 0000000..74f2d6d
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "test_condition_variable.h"
+
+int TestMain() {
+    REMARK( "testing with tbb condvar\n" );
+    DoCondVarTest<tbb::mutex,tbb::recursive_mutex>();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_header.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_header.cpp
new file mode 100644 (file)
index 0000000..ffffdc6
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+/**
+    This test ensures that tbb.h brings in all the public TBB interface definitions,
+    and if all the necessary symbols are exported from the library.
+
+    Most of the checks happen at the compilation or link phases.
+**/
+
+#include "tbb/tbb.h"
+
+static volatile size_t g_sink;
+
+#define TestTypeDefinitionPresence( Type ) g_sink = sizeof(tbb::Type);
+#define TestTypeDefinitionPresence2(TypeStart, TypeEnd) g_sink = sizeof(tbb::TypeStart,TypeEnd);
+#define TestFuncDefinitionPresence(Fn, Args, ReturnType) { ReturnType (*pfn)Args = &tbb::Fn; (void)pfn; }
+
+struct Body {
+    void operator() () const {}
+};
+struct Body1 {
+    void operator() ( int ) const {}
+};
+struct Body1a {
+    int operator() ( const tbb::blocked_range<int>&, const int ) const { return 0; }
+};
+struct Body1b {
+    int operator() ( const int, const int ) const { return 0; }
+};
+struct Body2 {
+    Body2 () {}
+    Body2 ( const Body2&, tbb::split ) {}
+    void operator() ( const tbb::blocked_range<int>& ) const {}
+    void join( const Body2& ) {}
+};
+struct Body3 {
+    Body3 () {}
+    Body3 ( const Body3&, tbb::split ) {}
+    void operator() ( const tbb::blocked_range2d<int>&, tbb::pre_scan_tag ) const {}
+    void operator() ( const tbb::blocked_range2d<int>&, tbb::final_scan_tag ) const {}
+    void reverse_join( Body3& ) {}
+    void assign( const Body3& ) {}
+};
+
+#if !__TBB_TEST_SECONDARY
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness.h"
+
+// Test if all the necessary symbols are exported for the exceptions thrown by TBB.
+// Missing exports result either in link error or in runtime assertion failure.
+#include "tbb/tbb_exception.h"
+
+template <typename E>
+void TestExceptionClassExports ( const E& exc, tbb::internal::exception_id eid ) {
+    // The assertion here serves to shut up warnings about "eid not used". 
+    ASSERT( eid<tbb::internal::eid_max, NULL );
+#if TBB_USE_EXCEPTIONS
+    for ( int i = 0; i < 2; ++i ) {
+        try {
+            if ( i == 0 )
+                throw exc;
+#if !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
+            else
+                tbb::internal::throw_exception( eid );
+#endif
+        }
+        catch ( E& e ) {
+            ASSERT ( e.what(), "Missing what() string" );
+        }
+        catch ( ... ) {
+            ASSERT ( __TBB_EXCEPTION_TYPE_INFO_BROKEN, "Unrecognized exception. Likely RTTI related exports are missing" );
+        }
+    }
+#else /* !TBB_USE_EXCEPTIONS */
+    (void)exc;
+#endif /* !TBB_USE_EXCEPTIONS */
+}
+
+void TestExceptionClassesExports () {
+    TestExceptionClassExports( std::bad_alloc(), tbb::internal::eid_bad_alloc );
+    TestExceptionClassExports( tbb::bad_last_alloc(), tbb::internal::eid_bad_last_alloc );
+    TestExceptionClassExports( std::invalid_argument("test"), tbb::internal::eid_nonpositive_step );
+    TestExceptionClassExports( std::out_of_range("test"), tbb::internal::eid_out_of_range );
+    TestExceptionClassExports( std::range_error("test"), tbb::internal::eid_segment_range_error );
+    TestExceptionClassExports( std::range_error("test"), tbb::internal::eid_index_range_error );
+    TestExceptionClassExports( tbb::missing_wait(), tbb::internal::eid_missing_wait );
+    TestExceptionClassExports( tbb::invalid_multiple_scheduling(), tbb::internal::eid_invalid_multiple_scheduling );
+    TestExceptionClassExports( tbb::improper_lock(), tbb::internal::eid_improper_lock );
+}
+#endif /* !__TBB_TEST_SECONDARY */
+
+
+#if __TBB_TEST_SECONDARY
+/* This mode is used to produce a secondary object file that is linked with 
+   the main one in order to detect "multiple definition" linker error.
+*/
+void secondary()
+#else
+int TestMain ()
+#endif
+{
+    TestTypeDefinitionPresence2(aligned_space<int, 1> );
+    TestTypeDefinitionPresence( atomic<int> );
+    TestTypeDefinitionPresence( cache_aligned_allocator<int> );
+    TestTypeDefinitionPresence( tbb_hash_compare<int> );
+    TestTypeDefinitionPresence2(concurrent_hash_map<int, int> );
+    TestTypeDefinitionPresence2(concurrent_unordered_map<int, int> );
+    TestTypeDefinitionPresence( concurrent_bounded_queue<int> );
+    TestTypeDefinitionPresence( deprecated::concurrent_queue<int> );
+    TestTypeDefinitionPresence( strict_ppl::concurrent_queue<int> );
+    TestTypeDefinitionPresence( combinable<int> );
+    TestTypeDefinitionPresence( concurrent_vector<int> );
+    TestTypeDefinitionPresence( enumerable_thread_specific<int> );
+    TestTypeDefinitionPresence( mutex );
+    TestTypeDefinitionPresence( null_mutex );
+    TestTypeDefinitionPresence( null_rw_mutex );
+    TestTypeDefinitionPresence( queuing_mutex );
+    TestTypeDefinitionPresence( queuing_rw_mutex );
+    TestTypeDefinitionPresence( recursive_mutex );
+    TestTypeDefinitionPresence( spin_mutex );
+    TestTypeDefinitionPresence( spin_rw_mutex );
+    TestTypeDefinitionPresence( critical_section );
+    TestTypeDefinitionPresence( reader_writer_lock );
+    TestTypeDefinitionPresence( tbb_exception );
+    TestTypeDefinitionPresence( captured_exception );
+    TestTypeDefinitionPresence( movable_exception<int> );
+#if !TBB_USE_CAPTURED_EXCEPTION
+    TestTypeDefinitionPresence( internal::tbb_exception_ptr );
+#endif /* !TBB_USE_CAPTURED_EXCEPTION */
+    TestTypeDefinitionPresence( blocked_range3d<int> );
+    TestFuncDefinitionPresence( parallel_invoke, (const Body&, const Body&), void );
+    TestFuncDefinitionPresence( parallel_do, (int*, int*, const Body1&), void );
+    TestFuncDefinitionPresence( parallel_for_each, (int*, int*, const Body1&), void );
+    TestFuncDefinitionPresence( parallel_for, (int, int, int, const Body1&), void );
+    TestFuncDefinitionPresence( parallel_for, (const tbb::blocked_range<int>&, const Body2&, const tbb::simple_partitioner&), void );
+    TestFuncDefinitionPresence( parallel_reduce, (const tbb::blocked_range<int>&, const int&, const Body1a&, const Body1b&, const tbb::auto_partitioner&), int );
+    TestFuncDefinitionPresence( parallel_reduce, (const tbb::blocked_range<int>&, Body2&, tbb::affinity_partitioner&), void );
+    TestFuncDefinitionPresence( parallel_scan, (const tbb::blocked_range2d<int>&, Body3&, const tbb::auto_partitioner&), void );
+    TestFuncDefinitionPresence( parallel_sort, (int*, int*), void );
+    TestTypeDefinitionPresence( pipeline );
+    TestFuncDefinitionPresence( parallel_pipeline, (size_t, const tbb::filter_t<void,void>&), void );
+    TestTypeDefinitionPresence( task );
+    TestTypeDefinitionPresence( empty_task );
+    TestTypeDefinitionPresence( task_list );
+    TestTypeDefinitionPresence( task_group_context );
+    TestTypeDefinitionPresence( task_group );
+    TestTypeDefinitionPresence( task_handle<Body> );
+    TestTypeDefinitionPresence( task_scheduler_init );
+    TestTypeDefinitionPresence( task_scheduler_observer );
+    TestTypeDefinitionPresence( tbb_thread );
+    TestTypeDefinitionPresence( tbb_allocator<int> );
+    TestTypeDefinitionPresence( zero_allocator<int> );
+    TestTypeDefinitionPresence( tick_count );
+#if !__TBB_TEST_SECONDARY
+    TestExceptionClassesExports();
+    return Harness::Done;
+#endif
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_thread.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_thread.cpp
new file mode 100644 (file)
index 0000000..962dd40
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#define THREAD tbb::tbb_thread
+#define THIS_THREAD tbb::this_tbb_thread
+#define THIS_THREAD_SLEEP THIS_THREAD::sleep
+#include "test_thread.h"
+#include "harness.h"
+
+/* we want to test tbb::tbb_thread */
+int TestMain () {
+    CheckSignatures();
+    RunTests();
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_version.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tbb_version.cpp
new file mode 100644 (file)
index 0000000..d6b13f0
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_stddef.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
+    #pragma warning (push)
+    #pragma warning (disable: 4530)
+#endif
+
+#include <vector>
+#include <string>
+#include <utility>
+
+#if !TBB_USE_EXCEPTIONS && _MSC_VER
+    #pragma warning (pop)
+#endif
+
+#include "tbb/task_scheduler_init.h"
+
+#define HARNESS_CUSTOM_MAIN 1
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#define HARNESS_NO_MAIN_ARGS 0
+#include "harness.h"
+
+#if defined (_WIN32) || defined (_WIN64)
+#define TEST_SYSTEM_COMMAND "test_tbb_version.exe @"
+#define putenv _putenv
+#else
+#define TEST_SYSTEM_COMMAND "./test_tbb_version.exe @"
+#endif
+
+enum string_required {
+    required,
+    not_required
+    };
+
+typedef std::pair <std::string, string_required> string_pair;
+
+void initialize_strings_vector(std::vector <string_pair>* vector);
+
+const char stderr_stream[] = "version_test.err";
+const char stdout_stream[] = "version_test.out";
+
+HARNESS_EXPORT
+int main(int argc, char *argv[] ) {
+/* We first introduced runtime version identification in 3014 */
+#if TBB_INTERFACE_VERSION>=3014 
+    // For now, just test that run-time TBB version matches the compile-time version,
+    // since otherwise the subsequent test of "TBB: INTERFACE VERSION" string will fail anyway.
+    // We need something more clever in future.
+    ASSERT(tbb::TBB_runtime_interface_version()==TBB_INTERFACE_VERSION,
+           "Running with the library of different version than the test was compiled against");
+#endif
+    __TBB_TRY {
+        FILE *stream_out;
+        FILE *stream_err;   
+        char psBuffer[512];
+        
+        if(argc>1 && argv[1][0] == '@' ) {
+            stream_err = freopen( stderr_stream, "w", stderr );
+            if( stream_err == NULL ){
+                REPORT( "Internal test error (freopen)\n" );
+                exit( 1 );
+            }
+            stream_out = freopen( stdout_stream, "w", stdout );
+            if( stream_out == NULL ){
+                REPORT( "Internal test error (freopen)\n" );
+                exit( 1 );
+            }
+            {
+                tbb::task_scheduler_init init(1);
+            }
+            fclose( stream_out );
+            fclose( stream_err );
+            exit(0);
+        }
+        //1st step check that output is empty if TBB_VERSION is not defined.
+        if ( getenv("TBB_VERSION") ){
+            REPORT( "TBB_VERSION defined, skipping step 1 (empty output check)\n" );
+        }else{
+            if( ( system(TEST_SYSTEM_COMMAND) ) != 0 ){
+                REPORT( "Error (step 1): Internal test error\n" );
+                exit( 1 );
+            }
+            //Checking output streams - they should be empty
+            stream_err = fopen( stderr_stream, "r" );
+            if( stream_err == NULL ){
+                REPORT( "Error (step 1):Internal test error (stderr open)\n" );
+                exit( 1 );
+            }
+            while( !feof( stream_err ) ) {
+                if( fgets( psBuffer, 512, stream_err ) != NULL ){
+                    REPORT( "Error (step 1): stderr should be empty\n" );
+                    exit( 1 );
+                }
+            }
+            fclose( stream_err );
+            stream_out = fopen( stdout_stream, "r" );
+            if( stream_out == NULL ){
+                REPORT( "Error (step 1):Internal test error (stdout open)\n" );
+                exit( 1 );
+            }
+            while( !feof( stream_out ) ) {
+                if( fgets( psBuffer, 512, stream_out ) != NULL ){
+                    REPORT( "Error (step 1): stdout should be empty\n" );
+                    exit( 1 );
+                }
+            }
+            fclose( stream_out );
+        }
+
+        //Setting TBB_VERSION in case it is not set
+        if ( !getenv("TBB_VERSION") ){
+            putenv(const_cast<char*>("TBB_VERSION=1"));
+        }
+
+        if( ( system(TEST_SYSTEM_COMMAND) ) != 0 ){
+            REPORT( "Error (step 2):Internal test error\n" );
+            exit( 1 );
+        }
+        //Checking pipe - it should contain version data
+        std::vector <string_pair> strings_vector;
+        std::vector <string_pair>::iterator strings_iterator;
+
+        initialize_strings_vector( &strings_vector );
+        strings_iterator = strings_vector.begin();
+
+        stream_out = fopen( stdout_stream, "r" );
+        if( stream_out == NULL ){
+            REPORT( "Error (step 2):Internal test error (stdout open)\n" );
+            exit( 1 );
+        }
+        while( !feof( stream_out ) ) {
+            if( fgets( psBuffer, 512, stream_out ) != NULL ){
+                REPORT( "Error (step 2): stdout should be empty\n" );
+                exit( 1 );
+            }
+        }
+        fclose( stream_out );
+
+        stream_err = fopen( stderr_stream, "r" );
+        if( stream_err == NULL ){
+            REPORT( "Error (step 1):Internal test error (stderr open)\n" );
+            exit( 1 );
+        }
+        
+        int skip_line = 0;
+        
+        while( !feof( stream_err ) ) {
+            if( fgets( psBuffer, 512, stream_err ) != NULL ){
+                do{
+                    if ( strings_iterator == strings_vector.end() ){
+                        REPORT( "Error: version string dictionary ended prematurely.\n" );
+                        REPORT( "No match for: \t%s", psBuffer );
+                        exit( 1 );
+                    }
+                    if ( strstr( psBuffer, strings_iterator->first.c_str() ) == NULL ){
+                        if( strings_iterator->second == required ){
+                            REPORT( "Error: version strings do not match.\n" );
+                            REPORT( "Expected \"%s\" not found in:\n\t%s", strings_iterator->first.c_str(), psBuffer );
+                            exit( 1 );
+                        }else{
+                            //Do we need to print in case there is no non-required string?
+                            skip_line = 1;
+                        }
+                    }else{
+                           skip_line = 0;
+                    }
+                    if ( strings_iterator != strings_vector.end() ) strings_iterator ++;
+                }while( skip_line );
+            }
+        }
+        fclose( stream_err );
+    } __TBB_CATCH(...) {
+        ASSERT( 0,"unexpected exception" );
+    }
+    REPORT("done\n");
+    return 0;
+}
+
+
+// Fill dictionary with version strings for platforms 
+void initialize_strings_vector(std::vector <string_pair>* vector)
+{
+    vector->push_back(string_pair("TBB: VERSION\t\t3.0", required));          // check TBB_VERSION
+    vector->push_back(string_pair("TBB: INTERFACE VERSION\t5003", required)); // check TBB_INTERFACE_VERSION
+    vector->push_back(string_pair("TBB: BUILD_DATE", required));
+    vector->push_back(string_pair("TBB: BUILD_HOST", required));
+    vector->push_back(string_pair("TBB: BUILD_OS", required));
+#if _WIN32||_WIN64
+#if !__MINGW32__
+    vector->push_back(string_pair("TBB: BUILD_CL", required));
+#endif
+    vector->push_back(string_pair("TBB: BUILD_COMPILER", required));
+#elif __APPLE__
+    vector->push_back(string_pair("TBB: BUILD_KERNEL", required));
+    vector->push_back(string_pair("TBB: BUILD_GCC", required));
+    vector->push_back(string_pair("TBB: BUILD_COMPILER", not_required)); //if( getenv("COMPILER_VERSION") )
+#elif __sun
+    vector->push_back(string_pair("TBB: BUILD_KERNEL", required));
+    vector->push_back(string_pair("TBB: BUILD_SUNCC", required));
+    vector->push_back(string_pair("TBB: BUILD_COMPILER", not_required)); //if( getenv("COMPILER_VERSION") )
+#else //We use version_info_linux.sh for unsupported OSes
+    vector->push_back(string_pair("TBB: BUILD_KERNEL", required));
+    vector->push_back(string_pair("TBB: BUILD_GCC", required));
+    vector->push_back(string_pair("TBB: BUILD_COMPILER", not_required)); //if( getenv("COMPILER_VERSION") )
+    vector->push_back(string_pair("TBB: BUILD_GLIBC", required));
+    vector->push_back(string_pair("TBB: BUILD_LD", required));
+#endif
+    vector->push_back(string_pair("TBB: BUILD_TARGET", required));
+    vector->push_back(string_pair("TBB: BUILD_COMMAND", required));
+    vector->push_back(string_pair("TBB: TBB_USE_DEBUG", required));
+    vector->push_back(string_pair("TBB: TBB_USE_ASSERT", required));
+    vector->push_back(string_pair("TBB: DO_ITT_NOTIFY", required));
+    vector->push_back(string_pair("TBB: ITT", not_required)); //#ifdef DO_ITT_NOTIFY
+    vector->push_back(string_pair("TBB: ALLOCATOR", required));
+    vector->push_back(string_pair("TBB: RML", not_required));
+    vector->push_back(string_pair("TBB: Intel(R) RML library built:", not_required));
+    vector->push_back(string_pair("TBB: Intel(R) RML library version:", not_required));
+    vector->push_back(string_pair("TBB: SCHEDULER", required));
+
+    return;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_thread.h b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_thread.h
new file mode 100644 (file)
index 0000000..888eeef
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tbb_thread.h"
+#include "tbb/atomic.h"
+
+#define HARNESS_NO_PARSE_COMMAND_LINE 1
+#include "harness_report.h"
+#include "harness_assert.h"
+
+static const int THRDS = 3;
+static const int THRDS_DETACH = 2;
+static tbb::atomic<int> sum;
+static tbb::atomic<int> BaseCount;
+static THREAD::id real_ids[THRDS+THRDS_DETACH];
+
+class Base {
+    mutable int copy_throws;
+    friend void RunTests();
+    friend void CheckExceptionSafety();
+    void operator=( const Base& );   // Deny access
+protected:
+    Base() : copy_throws(100) {++BaseCount;}
+    Base( const Base& c ) : copy_throws(c.copy_throws) {
+        if( --copy_throws<=0 ) 
+            __TBB_THROW(0);
+        ++BaseCount; 
+    }
+    ~Base() {--BaseCount;}
+};
+
+template<int N>
+class Data: Base {
+    Data();                          // Deny access
+    explicit Data(int v) : value(v) {}
+
+    friend void RunTests();
+    friend void CheckExceptionSafety();
+public:
+    int value;
+};
+
+
+#include "harness_barrier.h"
+
+class ThreadFunc: Base {
+    ThreadFunc() {}
+
+    static Harness::SpinBarrier init_barrier;
+
+    friend void RunTests();
+public:
+    void operator()(){
+        real_ids[0] = THIS_THREAD::get_id();
+        init_barrier.wait();
+        
+        sum.fetch_and_add(1);
+    }
+    void operator()(int num){
+        real_ids[num] = THIS_THREAD::get_id();
+        init_barrier.wait();
+
+        sum.fetch_and_add(num);
+    }
+    void operator()(int num, Data<0> dx) { 
+        real_ids[num] = THIS_THREAD::get_id();
+
+        const double WAIT = .1;
+        const double SHORT_TOLERANCE = 1e-8;
+#if _WIN32 || _WIN64
+        const double LONG_TOLERANCE = 0.120;  // maximal scheduling quantum for Windows Server
+#else
+        const double LONG_TOLERANCE = 0.200;  // reasonable upper bound
+#endif
+        tbb::tick_count t0 = tbb::tick_count::now();
+        tbb::this_tbb_thread::sleep( tbb::tick_count::interval_t(WAIT) );
+        tbb::tick_count t1 = tbb::tick_count::now();
+        double delta = (t1-t0).seconds() - WAIT;
+        if(delta+SHORT_TOLERANCE <= 0.0)
+            REPORT("ERROR: Sleep interval too short (%g outside short tolerance(%g))\n", (t1-t0).seconds(), WAIT - SHORT_TOLERANCE);
+        if(delta > LONG_TOLERANCE)
+            REPORT("WARNING: Sleep interval too long (%g outside long tolerance(%g))\n", (t1-t0).seconds(), WAIT + LONG_TOLERANCE);
+
+        init_barrier.wait();
+
+        sum.fetch_and_add(num);
+        sum.fetch_and_add(dx.value);
+    }
+    void operator()(Data<0> d) {
+        tbb::this_tbb_thread::sleep( tbb::tick_count::interval_t(d.value*1.) );
+    }
+};
+
+Harness::SpinBarrier ThreadFunc::init_barrier(THRDS);
+
+void CheckRelations( const THREAD::id ids[], int n, bool duplicates_allowed ) {
+    for( int i=0; i<n; ++i ) {
+        const THREAD::id x = ids[i];
+        for( int j=0; j<n; ++j ) {
+            const THREAD::id y = ids[j];
+            ASSERT( (x==y)==!(x!=y), NULL );
+            ASSERT( (x<y)==!(x>=y), NULL );
+            ASSERT( (x>y)==!(x<=y), NULL );
+            ASSERT( (x<y)+(x==y)+(x>y)==1, NULL );
+            ASSERT( x!=y || i==j || duplicates_allowed, NULL );
+            for( int k=0; k<n; ++k ) {
+                const THREAD::id z = ids[j];
+                ASSERT( !(x<y && y<z) || x<z, "< is not transitive" );
+            }    
+        }
+    }
+}
+
+class AnotherThreadFunc: Base {
+public:
+    void operator()() {}
+    void operator()(const Data<1>&) {}
+    void operator()(const Data<1>&, const Data<2>&) {}
+    friend void CheckExceptionSafety();
+};
+
+#if TBB_USE_EXCEPTIONS
+void CheckExceptionSafety() { 
+    int original_count = BaseCount;
+    // d loops over number of copies before throw occurs 
+    for( int d=1; d<=3; ++d ) {
+        // Try all combinations of throw/nothrow for f, x, and y's copy constructor.
+        for( int i=0; i<8; ++i ) {
+            { 
+                const AnotherThreadFunc f = AnotherThreadFunc();
+                if( i&1 ) f.copy_throws = d;
+                const Data<1> x(0);
+                if( i&2 ) x.copy_throws = d;
+                const Data<2> y(0);
+                if( i&4 ) y.copy_throws = d;
+                bool exception_caught = false;
+                for( int j=0; j<3; ++j ) {
+                    try { 
+                        switch(j) {
+                            case 0: {THREAD t(f); t.join();} break;
+                            case 1: {THREAD t(f,x); t.join();} break;
+                            case 2: {THREAD t(f,x,y); t.join();} break;
+                        }
+                    } catch(...) {
+                        exception_caught = true;
+                    } 
+                    ASSERT( !exception_caught||(i&((1<<(j+1))-1))!=0, NULL );
+                }
+            }
+// Intel Compiler sometimes fails to destroy all implicitly generated copies 
+// of an object when a copy constructor throws an exception.
+// Problem was reported as Quad issue 482935.
+// This #if should be removed or tightened when the bug is fixed.
+#if !((_WIN32 || _WIN64) && defined(__INTEL_COMPILER))
+            ASSERT( BaseCount==original_count, "object leak detected" );
+#endif
+        }
+    }
+}
+#endif /* TBB_USE_EXCEPTIONS */
+
+#include <cstdio>
+
+void RunTests() {
+
+    ThreadFunc t;
+    Data<0> d100(100), d1(1), d0(0);
+    THREAD::id id;
+    THREAD::id id0, uniq_ids[THRDS];
+    
+    THREAD thrs[THRDS];
+    THREAD thr;
+    THREAD thr0(t);
+    THREAD thr1(t, 2);
+    THREAD thr2(t, 1, d100);
+    
+    ASSERT( thr0.get_id() != id, NULL );
+    id0 = thr0.get_id();
+    tbb::move(thrs[0], thr0);
+    ASSERT( thr0.get_id() == id, NULL );
+    ASSERT( thrs[0].get_id() == id0, NULL );
+
+    THREAD::native_handle_type h1 = thr1.native_handle();
+    THREAD::native_handle_type h2 = thr2.native_handle();
+    THREAD::id id1 = thr1.get_id();
+    THREAD::id id2 = thr2.get_id();
+    tbb::swap(thr1, thr2);
+    ASSERT( thr1.native_handle() == h2, NULL );
+    ASSERT( thr2.native_handle() == h1, NULL );
+    ASSERT( thr1.get_id() == id2, NULL );
+    ASSERT( thr2.get_id() == id1, NULL );
+    thr1.swap(thr2);
+    ASSERT( thr1.native_handle() == h1, NULL );
+    ASSERT( thr2.native_handle() == h2, NULL );
+    ASSERT( thr1.get_id() == id1, NULL );
+    ASSERT( thr2.get_id() == id2, NULL );
+    thr1.swap(thr2);
+
+    tbb::move(thrs[1], thr1);
+    ASSERT( thr1.get_id() == id, NULL );
+
+    tbb::move(thrs[2], thr2);
+    ASSERT( thr2.get_id() == id, NULL );
+
+    for (int i=0; i<THRDS; i++)
+        uniq_ids[i] = thrs[i].get_id();
+
+    ASSERT( thrs[2].joinable(), NULL );
+
+    for (int i=0; i<THRDS; i++)
+        thrs[i].join();
+    for (int i=0; i<THRDS; i++)
+        ASSERT(  real_ids[i] == uniq_ids[i], NULL );
+
+    int current_sum = sum;
+    ASSERT( current_sum == 104, NULL );
+    ASSERT( ! thrs[2].joinable(), NULL );
+    ASSERT( BaseCount==4, "object leak detected" );
+
+#if TBB_USE_EXCEPTIONS
+    CheckExceptionSafety(); 
+#endif
+
+    // Note: all tests involving BaseCount should be put before the tests
+    // involing detached threads, because there is no way of knowing when 
+    // a detached thread destroys its arguments.
+
+    THREAD thr_detach_0(t, d0);
+    real_ids[THRDS] = thr_detach_0.get_id();
+    thr_detach_0.detach();
+    ASSERT( thr_detach_0.get_id() == id, NULL );
+
+    THREAD thr_detach_1(t, d1);
+    real_ids[THRDS+1] = thr_detach_1.get_id();
+    thr_detach_1.detach();
+    ASSERT( thr_detach_1.get_id() == id, NULL );
+
+    CheckRelations(real_ids, THRDS+THRDS_DETACH, true);
+
+    CheckRelations(uniq_ids, THRDS, false);
+
+    for (int i=0; i<2; i++) {
+        AnotherThreadFunc empty_func;
+        THREAD thr_to(empty_func), thr_from(empty_func);
+        THREAD::id from_id = thr_from.get_id();
+        if (i) thr_to.join(); 
+        thr_to = thr_from;
+        ASSERT( thr_from.get_id() == THREAD::id(), NULL );
+        ASSERT( thr_to.get_id() == from_id, NULL );
+    }
+
+    ASSERT( THREAD::hardware_concurrency() > 0, NULL);
+}
+
+typedef bool (*id_relation)( THREAD::id, THREAD::id );
+
+id_relation CheckSignatures() {
+    id_relation r[6] = {&tbb::operator==,
+                        &tbb::operator!=,
+                        &tbb::operator<,
+                        &tbb::operator>,
+                        &tbb::operator<=,
+                        &tbb::operator>=};
+    return r[1];
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tick_count.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_tick_count.cpp
new file mode 100644 (file)
index 0000000..c4360a0
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+#include "tbb/tick_count.h"
+#include "harness.h"
+#include <cstdio>
+
+//! Assert that two times in seconds are very close.
+void AssertNear( double x, double y ) {
+    ASSERT( -1.0E-10 <= x-y && x-y <=1.0E-10, NULL );
+}
+
+//! Test arithmetic operators on tick_count::interval_t
+void TestArithmetic( const tbb::tick_count& t0, const tbb::tick_count& t1, const tbb::tick_count& t2 ) {
+    tbb::tick_count::interval_t i= t1-t0;
+    tbb::tick_count::interval_t j = t2-t1;
+    tbb::tick_count::interval_t k = t2-t0;
+    AssertSameType( tbb::tick_count::interval_t(), i-j );
+    AssertSameType( tbb::tick_count::interval_t(), i+j );
+    ASSERT( i.seconds()>1E-9, NULL );
+    ASSERT( j.seconds()>1E-9, NULL );
+    ASSERT( k.seconds()>2E-9, NULL );
+    AssertNear( (i+j).seconds(), k.seconds() );
+    AssertNear( (k-j).seconds(), i.seconds() );
+    AssertNear( ((k-j)+(j-i)).seconds(), k.seconds()-i.seconds() );
+    tbb::tick_count::interval_t sum;
+    sum += i;
+    sum += j;
+    AssertNear( sum.seconds(), k.seconds() );
+    sum -= i;
+    AssertNear( sum.seconds(), j.seconds() );
+    sum -= j;
+    AssertNear( sum.seconds(), 0.0 );
+}
+
+//------------------------------------------------------------------------
+// Test for overhead in calls to tick_count 
+//------------------------------------------------------------------------
+
+//! Wait for given duration.
+/** The duration parameter is in units of seconds. */
+static void WaitForDuration( double duration ) {
+    tbb::tick_count start = tbb::tick_count::now();
+    while( (tbb::tick_count::now()-start).seconds() < duration )
+        continue;
+}
+
+//! Test that average timer overhead is within acceptable limit.
+/** The 'tolerance' value inside the test specifies the limit. */
+void TestSimpleDelay( int ntrial, double duration, double tolerance ) {
+    double total_worktime = 0;
+    // Iteration -1 warms up the code cache.
+    for( int trial=-1; trial<ntrial; ++trial ) {
+        tbb::tick_count t0 = tbb::tick_count::now();
+        if( duration ) WaitForDuration(duration);
+        tbb::tick_count t1 = tbb::tick_count::now();
+        if( trial>=0 ) {
+            total_worktime += (t1-t0).seconds(); 
+        }
+    }
+    // Compute average worktime and average delta
+    double worktime = total_worktime/ntrial;
+    double delta = worktime-duration;
+    REMARK("worktime=%g delta=%g tolerance=%g\n", worktime, delta, tolerance);
+
+    // Check that delta is acceptable
+    if( delta<0 ) 
+        REPORT("ERROR: delta=%g < 0\n",delta); 
+    if( delta>tolerance )
+        REPORT("%s: delta=%g > %g=tolerance where duration=%g\n",delta>3*tolerance?"ERROR":"Warning",delta,tolerance,duration);
+}
+
+//------------------------------------------------------------------------
+// Test for subtracting calls to tick_count from different threads.
+//------------------------------------------------------------------------
+
+#include "tbb/atomic.h"
+const int MAX_NTHREAD = 1000;
+static tbb::atomic<int> Counter;
+static volatile bool Flag;
+static tbb::tick_count tick_count_array[MAX_NTHREAD];
+
+struct TickCountDifferenceBody {
+    void operator()( int id ) const {
+        if( --Counter==0 ) Flag = true;
+        while( !Flag ) continue;
+        tick_count_array[id] = tbb::tick_count::now();
+    }
+};
+
+//! Test that two tick_count values recorded on different threads can be meaningfully subtracted.
+void TestTickCountDifference( int n ) {
+    double tolerance = 3E-4;
+    for( int trial=0; trial<10; ++trial ) {
+        Counter = n;
+        Flag = false;
+        NativeParallelFor( n, TickCountDifferenceBody() ); 
+        ASSERT( Counter==0, NULL ); 
+        for( int i=0; i<n; ++i )
+            for( int j=0; j<i; ++j ) {
+                double diff = (tick_count_array[i]-tick_count_array[j]).seconds();
+                if( diff<0 ) diff = -diff;
+                if( diff>tolerance ) {
+                    REPORT("%s: cross-thread tick_count difference = %g > %g = tolerance\n",
+                           diff>3*tolerance?"ERROR":"Warning",diff,tolerance);
+                }
+            }
+    }
+}
+
+int TestMain () {
+    tbb::tick_count t0 = tbb::tick_count::now();
+    TestSimpleDelay(/*ntrial=*/1000000,/*duration=*/0,    /*tolerance=*/2E-6);
+    tbb::tick_count t1 = tbb::tick_count::now();
+    TestSimpleDelay(/*ntrial=*/10,     /*duration=*/0.125,/*tolerance=*/5E-6);
+    tbb::tick_count t2 = tbb::tick_count::now();
+    TestArithmetic(t0,t1,t2);
+
+    for( int n=MinThread; n<=MaxThread; ++n ) {
+        TestTickCountDifference(n);
+    }
+    return Harness::Done;
+}
diff --git a/deal.II/contrib/tbb/tbb30_104oss/src/test/test_yield.cpp b/deal.II/contrib/tbb/tbb30_104oss/src/test/test_yield.cpp
new file mode 100644 (file)
index 0000000..301e485
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+    Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
+
+    This file is part of Threading Building Blocks.
+
+    Threading Building Blocks is free software; you can redistribute it
+    and/or modify it under the terms of the GNU General Public License
+    version 2 as published by the Free Software Foundation.
+
+    Threading Building Blocks is distributed in the hope that it will be
+    useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+    of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Threading Building Blocks; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+    As a special exception, you may use this file as part of a free software
+    library without restriction.  Specifically, if other files instantiate
+    templates or use macros or inline functions from this file, or you compile
+    this file and link it with other files to produce an executable, this
+    file does not by itself cause the resulting executable to be covered by
+    the GNU General Public License.  This exception does not however
+    invalidate any other reasons why the executable file might be covered by
+    the GNU General Public License.
+*/
+
+// Test that __TBB_Yield works.
+// On Red Hat EL4 U1, it does not work, because sched_yield is broken.
+
+#include "tbb/tbb_machine.h"
+#include "tbb/tick_count.h"
+#include "harness.h"
+
+static volatile long CyclicCounter;
+static volatile bool Quit;
+double SingleThreadTime;
+
+struct RoundRobin: NoAssign {
+    const int number_of_threads;
+    RoundRobin( long p ) : number_of_threads(p) {}
+    void operator()( long k ) const {
+        tbb::tick_count t0 = tbb::tick_count::now();
+        for( long i=0; i<10000; ++i ) {
+            // Wait for previous thread to notify us 
+            for( int j=0; CyclicCounter!=k && !Quit; ++j ) {
+                __TBB_Yield();
+                if( j%100==0 ) {
+                    tbb::tick_count t1 = tbb::tick_count::now();
+                    if( (t1-t0).seconds()>=1.0*number_of_threads ) {
+                        REPORT("Warning: __TBB_Yield failing to yield with %d threads (or system is heavily loaded)\n",number_of_threads);
+                        Quit = true;
+                        return;
+                    }
+                }
+            }
+            // Notify next thread that it can run            
+            CyclicCounter = (k+1)%number_of_threads;
+        }
+    }
+};
+
+int TestMain () {
+    for( int p=MinThread; p<=MaxThread; ++p ) {
+        REMARK("testing with %d threads\n", p );
+        CyclicCounter = 0;
+        Quit = false;
+        NativeParallelFor( long(p), RoundRobin(p) );
+    }
+    return Harness::Done;
+}
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.